aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/premerge.yaml2
-rw-r--r--clang/docs/InternalsManual.rst110
-rw-r--r--clang/docs/OpenMPSupport.rst6
-rw-r--r--clang/docs/ReleaseNotes.rst14
-rw-r--r--clang/include/clang/Basic/Builtins.td12
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td9
-rw-r--r--clang/include/clang/Basic/riscv_vector.td83
-rw-r--r--clang/include/clang/Basic/riscv_vector_common.td68
-rw-r--r--clang/include/clang/Sema/Sema.h7
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h17
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp9
-rw-r--r--clang/lib/AST/ExprConstant.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h12
-rw-r--r--clang/lib/CodeGen/CGHLSLBuiltins.cpp23
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h1
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp2
-rw-r--r--clang/lib/Format/FormatToken.cpp3
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp10
-rw-r--r--clang/lib/Headers/avx2intrin.h8
-rw-r--r--clang/lib/Headers/avx512bwintrin.h15
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h8
-rw-r--r--clang/lib/Headers/tmmintrin.h13
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp6
-rw-r--r--clang/lib/Sema/DeclSpec.cpp3
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp80
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h2
-rw-r--r--clang/lib/Sema/HLSLExternalSemaSource.cpp11
-rw-r--r--clang/lib/Sema/SemaDecl.cpp136
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp18
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp62
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp5
-rw-r--r--clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl14
-rw-r--r--clang/test/AST/HLSL/StructuredBuffers-AST.hlsl22
-rw-r--r--clang/test/AST/HLSL/TypedBuffers-AST.hlsl14
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c134
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c189
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c724
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c113
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c267
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c899
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c366
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c486
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c486
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c455
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c494
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c494
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c899
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c134
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c724
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c113
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c267
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c893
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c366
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c474
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c474
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c451
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c480
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c893
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c1577
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c233
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c572
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c2007
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c765
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c1017
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c1017
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c1015
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c1034
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c1034
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c2007
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c69
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c1539
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c233
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c572
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c249
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c489
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c1932
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c765
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c977
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c977
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c975
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c994
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c994
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c1932
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c129
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c129
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c1
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c4
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c1
-rw-r--r--clang/test/CodeGen/X86/ssse3-builtins.c1
-rw-r--r--clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl45
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl48
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl35
-rw-r--r--clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl32
-rw-r--r--clang/test/Driver/linker-wrapper.c4
-rw-r--r--clang/test/Parser/c2x-auto.c27
-rw-r--r--clang/test/Sema/attr-print.c3
-rw-r--r--clang/test/SemaTemplate/concepts.cpp25
-rw-r--r--clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp4
-rw-r--r--clang/unittests/Format/AlignBracketsTest.cpp13
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp5
-rw-r--r--compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp33
-rw-r--r--flang/include/flang/Optimizer/OpenMP/Passes.td4
-rw-r--r--flang/lib/Optimizer/OpenMP/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp1852
-rw-r--r--flang/lib/Optimizer/Passes/Pipelines.cpp4
-rw-r--r--flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir166
-rw-r--r--flang/test/Fir/OpenACC/recipe-populate-private.mlir223
-rw-r--r--flang/test/Fir/basic-program.fir1
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-multiple.f9020
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f9039
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f9045
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f9047
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f9053
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f9068
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-scalar-assign.f9029
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f9032
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f9022
-rw-r--r--flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f9022
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir33
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir117
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir118
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir71
-rw-r--r--flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir108
-rwxr-xr-xlibcxx/utils/compare-benchmarks15
-rw-r--r--lldb/include/lldb/Utility/DataExtractor.h2
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp65
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h4
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/Makefile6
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py39
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/lib.h6
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/lib.m8
-rw-r--r--lldb/test/API/lang/objc/ivar-in-framework-base/main.m22
-rw-r--r--llvm/docs/AMDGPUUsage.rst2
-rw-r--r--llvm/docs/CommandGuide/dsymutil.rst8
-rw-r--r--llvm/include/llvm/ADT/APFloat.h152
-rw-r--r--llvm/include/llvm/ADT/STLForwardCompat.h48
-rw-r--r--llvm/include/llvm/ADT/bit.h29
-rw-r--r--llvm/include/llvm/Analysis/LoopAnalysisManager.h2
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolution.h1
-rw-r--r--llvm/include/llvm/Support/DOTGraphTraits.h2
-rw-r--r--llvm/include/llvm/Support/MathExtras.h53
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopPassManager.h15
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp10
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp53
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp9
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp33
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp34
-rw-r--r--llvm/lib/Support/APFloat.cpp538
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp47
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp22
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td675
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h5
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp29
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp14
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp22
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp42
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/LoopPassManager.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp42
-rw-r--r--llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp40
-rw-r--r--llvm/lib/Transforms/Utils/SCCPSolver.cpp96
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp107
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h12
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp9
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanUtils.h2
-rw-r--r--llvm/test/CodeGen/AMDGPU/abs_i16.ll980
-rw-r--r--llvm/test/CodeGen/AMDGPU/add.v2i16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll22342
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll2356
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll5894
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll1242
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll768
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll362
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll7815
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll2484
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll4594
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll1340
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll4962
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll5336
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll5688
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll6014
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll6338
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll1411
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll140
-rw-r--r--llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/bypass-div.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir87
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll335
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg.bf16.ll123
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptosi.f16.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoui.f16.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/frem.ll65
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-i8.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv.ll788
-rw-r--r--llvm/test/CodeGen/AMDGPU/select.f16.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.v2i16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll80
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll29
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/div_minsize.ll148
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll186
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll607
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll258
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll607
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll88
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll161
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll216
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll226
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll270
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll270
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll553
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll282
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll282
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll571
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll559
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll519
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll773
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll519
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll519
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll773
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll496
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll22
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWBufferNonUniformIdx.ll (renamed from llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/StructuredBufferNonUniformIdx.ll)0
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageConstIdx.ll (renamed from llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageDynIdx.ll)2
-rw-r--r--llvm/test/CodeGen/X86/combine-umax.ll2
-rw-r--r--llvm/test/CodeGen/X86/combine-umin.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-compress.ll70
-rw-r--r--llvm/test/CodeGen/X86/vselect-avx.ll16
-rw-r--r--llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll216
-rw-r--r--llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll10
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll63
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt422
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt402
-rw-r--r--llvm/test/Other/loop-pm-invalidation.ll30
-rw-r--r--llvm/test/Other/new-pm-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll15
-rw-r--r--llvm/test/Transforms/InstCombine/scmp.ll261
-rw-r--r--llvm/test/Transforms/LoopPredication/preserve-bpi.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr48832.ll2
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll96
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll96
-rw-r--r--llvm/test/Transforms/PhaseOrdering/unswitch-cold-func.ll (renamed from llvm/test/Transforms/SimpleLoopUnswitch/PGO-nontrivial-unswitch.ll)9
-rw-r--r--llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll1
-rw-r--r--llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll16
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp6
-rw-r--r--llvm/utils/gn/secondary/lld/test/BUILD.gn7
-rw-r--r--llvm/utils/profcheck-xfail.txt1
-rw-r--r--mlir/Maintainers.md2
-rw-r--r--mlir/include/mlir/Conversion/Passes.td2
-rw-r--r--mlir/include/mlir/Dialect/Affine/IR/AffineOps.td1
-rw-r--r--mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h57
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td61
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h37
-rw-r--r--mlir/lib/Dialect/Affine/IR/AffineOps.cpp171
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt6
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp139
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp16
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp140
-rw-r--r--mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp152
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp48
-rw-r--r--mlir/lib/RegisterAllPasses.cpp1
-rw-r--r--mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir44
-rw-r--r--mlir/test/Dialect/LLVMIR/rocdl.mlir51
-rw-r--r--mlir/test/Dialect/Vector/linearize.mlir41
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir121
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir120
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg4
-rw-r--r--mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir151
-rw-r--r--mlir/test/Target/LLVMIR/rocdl.mlir129
-rw-r--r--polly/lib/Transform/Canonicalization.cpp6
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel11
453 files changed, 152350 insertions, 29915 deletions
diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml
index 03c0c01..951fc16 100644
--- a/.github/workflows/premerge.yaml
+++ b/.github/workflows/premerge.yaml
@@ -62,6 +62,7 @@ jobs:
with:
fetch-depth: 2
- name: Build and Test
+ timeout-minutes: 120
continue-on-error: ${{ runner.arch == 'ARM64' }}
run: |
git config --global --add safe.directory '*'
@@ -149,6 +150,7 @@ jobs:
echo "windows-runtimes=${runtimes_to_build}" >> $GITHUB_OUTPUT
echo "windows-runtimes-check-targets=${runtimes_check_targets}" >> $GITHUB_OUTPUT
- name: Build and Test
+ timeout-minutes: 180
if: ${{ steps.vars.outputs.windows-projects != '' }}
shell: cmd
run: |
diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst
index c677ddfa..eff46ab 100644
--- a/clang/docs/InternalsManual.rst
+++ b/clang/docs/InternalsManual.rst
@@ -10,7 +10,7 @@ Introduction
This document describes some of the more important APIs and internal design
decisions made in the Clang C front-end. The purpose of this document is to
-both capture some of this high level information and also describe some of the
+both capture some of this high-level information and also describe some of the
design decisions behind it. This is meant for people interested in hacking on
Clang, not for end-users. The description below is categorized by libraries,
and does not describe any of the clients of the libraries.
@@ -20,7 +20,7 @@ LLVM Support Library
The LLVM ``libSupport`` library provides many underlying libraries and
`data-structures <https://llvm.org/docs/ProgrammersManual.html>`_, including
-command line option processing, various containers and a system abstraction
+command line option processing, various containers, and a system abstraction
layer, which is used for file system access.
The Clang "Basic" Library
@@ -34,7 +34,7 @@ and information about the subset of the language being compiled for.
Part of this infrastructure is specific to C (such as the ``TargetInfo``
class), other parts could be reused for other non-C-based languages
(``SourceLocation``, ``SourceManager``, ``Diagnostics``, ``FileManager``).
-When and if there is future demand we can figure out if it makes sense to
+When and if there is future demand, we can figure out if it makes sense to
introduce a new library, move the general classes somewhere else, or introduce
some other solution.
@@ -96,7 +96,7 @@ The ``EXTENSION`` and ``EXTWARN`` severities are used for extensions to the
language that Clang accepts. This means that Clang fully understands and can
represent them in the AST, but we produce diagnostics to tell the user their
code is non-portable. The difference is that the former are ignored by
-default, and the later warn by default. The ``WARNING`` severity is used for
+default, and the latter warn by default. The ``WARNING`` severity is used for
constructs that are valid in the currently selected source language but that
are dubious in some way. The ``REMARK`` severity provides generic information
about the compilation that is not necessarily related to any dubious code. The
@@ -106,7 +106,7 @@ These *severities* are mapped into a smaller set (the ``Diagnostic::Level``
enum, {``Ignored``, ``Note``, ``Remark``, ``Warning``, ``Error``, ``Fatal``}) of
output
*levels* by the diagnostics subsystem based on various configuration options.
-Clang internally supports a fully fine grained mapping mechanism that allows
+Clang internally supports a fully fine-grained mapping mechanism that allows
you to map almost any diagnostic to the output level that you want. The only
diagnostics that cannot be mapped are ``NOTE``\ s, which always follow the
severity of the previously emitted diagnostic and ``ERROR``\ s, which can only
@@ -116,18 +116,18 @@ example).
Diagnostic mappings are used in many ways. For example, if the user specifies
``-pedantic``, ``EXTENSION`` maps to ``Warning``, if they specify
``-pedantic-errors``, it turns into ``Error``. This is used to implement
-options like ``-Wunused_macros``, ``-Wundef`` etc.
+options like ``-Wunused_macros``, ``-Wundef``, etc.
Mapping to ``Fatal`` should only be used for diagnostics that are considered so
severe that error recovery won't be able to recover sensibly from them (thus
-spewing a ton of bogus errors). One example of this class of error are failure
+spewing a ton of bogus errors). One example of this class of error is failure
to ``#include`` a file.
Diagnostic Wording
^^^^^^^^^^^^^^^^^^
The wording used for a diagnostic is critical because it is the only way for a
user to know how to correct their code. Use the following suggestions when
-wording a diagnostic.
+wording a diagnostic:
* Diagnostics in Clang do not start with a capital letter and do not end with
punctuation.
@@ -162,7 +162,7 @@ wording a diagnostic.
cannot be null in well-defined C++ code``.
* Prefer diagnostic wording without contractions whenever possible. The single
quote in a contraction can be visually distracting due to its use with
- syntactic constructs and contractions can be harder to understand for non-
+ syntactic constructs, and contractions can be harder to understand for non-
native English speakers.
The Format String
@@ -195,14 +195,14 @@ the C++ code that :ref:`produces them <internals-producing-diag>`, and are
referenced by ``%0`` .. ``%9``. If you have more than 10 arguments to your
diagnostic, you are doing something wrong :). Unlike ``printf``, there is no
requirement that arguments to the diagnostic end up in the output in the same
-order as they are specified, you could have a format string with "``%1 %0``"
+order as they are specified; you could have a format string with "``%1 %0``"
that swaps them, for example. The text in between the percent and digit are
formatting instructions. If there are no instructions, the argument is just
turned into a string and substituted in.
Here are some "best practices" for writing the English format string:
-* Keep the string short. It should ideally fit in the 80 column limit of the
+* Keep the string short. It should ideally fit in the 80-column limit of the
``DiagnosticKinds.td`` file. This avoids the diagnostic wrapping when
printed, and forces you to think about the important point you are conveying
with the diagnostic.
@@ -227,7 +227,7 @@ used to achieve this sort of thing in a localizable way, see below.
Formatting a Diagnostic Argument
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Arguments to diagnostics are fully typed internally, and come from a couple
+Arguments to diagnostics are fully typed internally and come from a couple of
different classes: integers, types, names, and random strings. Depending on
the class of the argument, it can be optionally formatted in different ways.
This gives the ``DiagnosticConsumer`` information about what the argument means
@@ -268,7 +268,7 @@ Description:
This format specifier is used to merge multiple related diagnostics together
into one common one, without requiring the difference to be specified as an
English string argument. Instead of specifying the string, the diagnostic
- gets an integer argument and the format string selects the numbered option.
+ gets an integer argument, and the format string selects the numbered option.
In this case, the "``%0``" value must be an integer in the range [0..2]. If
it is 0, it prints "unary", if it is 1 it prints "binary" if it is 2, it
prints "unary or binary". This allows other language translations to
@@ -287,7 +287,7 @@ Description:
additionally generates a namespace, enumeration, and enumerator list based on
the format string given. In the above case, a namespace is generated named
``FrobbleKind`` that has an unscoped enumeration with the enumerators
- ``VarDecl`` and ``FuncDecl`` which correspond to the values 0 and 1. This
+ ``VarDecl`` and ``FuncDecl``, which correspond to the values 0 and 1. This
permits a clearer use of the ``Diag`` in source code, as the above could be
called as: ``Diag(Loc, diag::frobble) << diag::FrobbleKind::VarDecl``.
@@ -407,7 +407,7 @@ Example:
def note_ovl_candidate : Note<
"candidate %sub{select_ovl_candidate}3,2,1 not viable">;
- and will act as if it was written
+ and will act as if it were written
``"candidate %select{function|constructor}3%select{| template| %1}2 not viable"``.
Description:
This format specifier is used to avoid repeating strings verbatim in multiple
@@ -447,7 +447,7 @@ For example, the binary expression error comes from code like this:
<< lex->getType() << rex->getType()
<< lex->getSourceRange() << rex->getSourceRange();
-This shows that use of the ``Diag`` method: it takes a location (a
+This shows the use of the ``Diag`` method: it takes a location (a
:ref:`SourceLocation <SourceLocation>` object) and a diagnostic enum value
(which matches the name from ``Diagnostic*Kinds.td``). If the diagnostic takes
arguments, they are specified with the ``<<`` operator: the first argument
@@ -586,7 +586,7 @@ Strangely enough, the ``SourceLocation`` class represents a location within the
source code of the program. Important design points include:
#. ``sizeof(SourceLocation)`` must be extremely small, as these are embedded
- into many AST nodes and are passed around often. Currently it is 32 bits.
+ into many AST nodes and are passed around often. Currently, it is 32 bits.
#. ``SourceLocation`` must be a simple value object that can be efficiently
copied.
#. We should be able to represent a source location for any byte of any input
@@ -605,7 +605,7 @@ In practice, the ``SourceLocation`` works together with the ``SourceManager``
class to encode two pieces of information about a location: its spelling
location and its expansion location. For most tokens, these will be the
same. However, for a macro expansion (or tokens that came from a ``_Pragma``
-directive) these will describe the location of the characters corresponding to
+directive), these will describe the location of the characters corresponding to
the token and the location where the token was used (i.e., the macro
expansion point or the location of the ``_Pragma`` itself).
@@ -621,7 +621,7 @@ token. This concept maps directly to the "spelling location" for the token.
.. mostly taken from https://discourse.llvm.org/t/code-ranges-of-tokens-ast-elements/16893/2
Clang represents most source ranges by [first, last], where "first" and "last"
-each point to the beginning of their respective tokens. For example consider
+each point to the beginning of their respective tokens. For example, consider
the ``SourceRange`` of the following statement:
.. code-block:: text
@@ -632,7 +632,7 @@ the ``SourceRange`` of the following statement:
To map from this representation to a character-based representation, the "last"
location needs to be adjusted to point to (or past) the end of that token with
either ``Lexer::MeasureTokenLength()`` or ``Lexer::getLocForEndOfToken()``. For
-the rare cases where character-level source ranges information is needed we use
+the rare cases where character-level source ranges information is needed, we use
the ``CharSourceRange`` class.
The Driver Library
@@ -651,17 +651,17 @@ The Frontend Library
====================
The Frontend library contains functionality useful for building tools on top of
-the Clang libraries, for example several methods for outputting diagnostics.
+the Clang libraries, including several methods for outputting diagnostics.
Compiler Invocation
-------------------
One of the classes provided by the Frontend library is ``CompilerInvocation``,
-which holds information that describe current invocation of the Clang ``-cc1``
+which holds information that describes the current invocation of the Clang ``-cc1``
frontend. The information typically comes from the command line constructed by
the Clang driver or from clients performing custom initialization. The data
structure is split into logical units used by different parts of the compiler,
-for example ``PreprocessorOptions``, ``LanguageOptions`` or ``CodeGenOptions``.
+for example, ``PreprocessorOptions``, ``LanguageOptions``, or ``CodeGenOptions``.
Command Line Interface
----------------------
@@ -698,7 +698,7 @@ Adding new Command Line Option
------------------------------
When adding a new command line option, the first place of interest is the header
-file declaring the corresponding options class (e.g. ``CodeGenOptions.h`` for
+file declaring the corresponding options class (e.g., ``CodeGenOptions.h`` for
command line option that affects the code generation). Create new member
variable for the option value:
@@ -739,7 +739,7 @@ The helper classes take a list of acceptable prefixes of the option (e.g.
Then, specify additional attributes via mix-ins:
* ``HelpText`` holds the text that will be printed besides the option name when
- the user requests help (e.g. via ``clang --help``).
+ the user requests help (e.g., via ``clang --help``).
* ``Group`` specifies the "category" of options this option belongs to. This is
used by various tools to categorize and sometimes filter options.
* ``Flags`` may contain "tags" associated with the option. These may affect how
@@ -779,7 +779,7 @@ use them to construct the ``-cc1`` job:
}
The last step is implementing the ``-cc1`` command line argument
-parsing/generation that initializes/serializes the option class (in our case
+parsing/generation that initializes/serializes the option class (in our case,
``CodeGenOptions``) stored within ``CompilerInvocation``. This can be done
automatically by using the marshalling annotations on the option definition:
@@ -946,13 +946,13 @@ described below. All of them take a key path argument and possibly other
information required for parsing or generating the command line argument.
**Note:** The marshalling infrastructure is not intended for driver-only
-options. Only options of the ``-cc1`` frontend need to be marshalled to/from
+options. Only options of the ``-cc1`` frontend need to be marshalled to/from a
``CompilerInvocation`` instance.
**Positive Flag**
The key path defaults to ``false`` and is set to ``true`` when the flag is
-present on command line.
+present on the command line.
.. code-block:: text
@@ -963,7 +963,7 @@ present on command line.
**Negative Flag**
The key path defaults to ``true`` and is set to ``false`` when the flag is
-present on command line.
+present on the command line.
.. code-block:: text
@@ -1041,7 +1041,7 @@ and the result is assigned to the key path on success.
The key path defaults to the value specified in ``MarshallingInfoEnum`` prefixed
by the contents of ``NormalizedValuesScope`` and ``::``. This ensures correct
-reference to an enum case is formed even if the enum resides in different
+reference to an enum case is formed even if the enum resides in a different
namespace or is an enum class. If the value present on the command line does not
match any of the comma-separated values from ``Values``, an error diagnostic is
issued. Otherwise, the corresponding element from ``NormalizedValues`` at the
@@ -1410,7 +1410,7 @@ or a clear engineering tradeoff -- should desugar minimally and wrap the result
in a construct representing the original source form.
For example, ``CXXForRangeStmt`` directly represents the syntactic form of a
-range-based for statement, but also holds a semantic representation of the
+range-based for statement but also holds a semantic representation of the
range declaration and iterator declarations. It does not contain a
fully-desugared ``ForStmt``, however.
@@ -1425,7 +1425,7 @@ with the same or similar semantics.
The ``Type`` class and its subclasses
-------------------------------------
-The ``Type`` class (and its subclasses) are an important part of the AST.
+The ``Type`` class (and its subclasses) is an important part of the AST.
Types are accessed through the ``ASTContext`` class, which implicitly creates
and uniques them as they are needed. Types have a couple of non-obvious
features: 1) they do not capture type qualifiers like ``const`` or ``volatile``
@@ -1474,7 +1474,7 @@ various operators (for example, the type of ``*Y`` is "``foo``", not
is an instance of the ``TypedefType`` class, which indicates that the type of
these expressions is a typedef for "``foo``".
-Representing types like this is great for diagnostics, because the
+Representing types like this is great for diagnostics because the
user-specified type is always immediately available. There are two problems
with this: first, various semantic checks need to make judgements about the
*actual structure* of a type, ignoring typedefs. Second, we need an efficient
@@ -1521,7 +1521,7 @@ know it exists. To continue the example, the result type of the indirection
operator is the pointee type of the subexpression. In order to determine the
type, we need to get the instance of ``PointerType`` that best captures the
typedef information in the program. If the type of the expression is literally
-a ``PointerType``, we can return that, otherwise we have to dig through the
+a ``PointerType``, we can return that; otherwise, we have to dig through the
typedefs to find the pointer type. For example, if the subexpression had type
"``foo*``", we could return that type as the result. If the subexpression had
type "``bar``", we want to return "``foo*``" (note that we do *not* want
@@ -1552,7 +1552,7 @@ that sets a bit), and remove one or more type qualifiers (just return a
``QualType`` with the bitfield set to empty).
Further, because the bits are stored outside of the type itself, we do not need
-to create duplicates of types with different sets of qualifiers (i.e. there is
+to create duplicates of types with different sets of qualifiers (i.e., there is
only a single heap allocated "``int``" type: "``const int``" and "``volatile
const int``" both point to the same heap allocated "``int``" type). This
reduces the heap size used to represent bits and also means we do not have to
@@ -1972,7 +1972,7 @@ and optimize code for it, but it's used as parsing continues to detect further
errors in the input. Clang-based tools also depend on such ASTs, and IDEs in
particular benefit from a high-quality AST for broken code.
-In presence of errors, clang uses a few error-recovery strategies to present the
+In the presence of errors, clang uses a few error-recovery strategies to present the
broken code in the AST:
- correcting errors: in cases where clang is confident about the fix, it
@@ -1981,7 +1981,7 @@ broken code in the AST:
provide more accurate subsequent diagnostics. Typo correction is a typical
example.
- representing invalid node: the invalid node is preserved in the AST in some
- form, e.g. when the "declaration" part of the declaration contains semantic
+ form, e.g., when the "declaration" part of the declaration contains semantic
errors, the Decl node is marked as invalid.
- dropping invalid node: this often happens for errors that we don’t have
graceful recovery. Prior to Recovery AST, a mismatched-argument function call
@@ -1994,9 +1994,9 @@ for broken code.
Recovery AST
^^^^^^^^^^^^
-The idea of Recovery AST is to use recovery nodes which act as a placeholder to
+The idea of Recovery AST is to use recovery nodes, which act as a placeholder to
maintain the rough structure of the parsing tree, preserve locations and
-children but have no language semantics attached to them.
+children, but have no language semantics attached to them.
For example, consider the following mismatched function call:
@@ -2031,10 +2031,10 @@ With Recovery AST, the AST looks like:
`-DeclRefExpr <col:9> 'int' lvalue ParmVar 'abc' 'int'
-An alternative is to use existing Exprs, e.g. CallExpr for the above example.
-This would capture more call details (e.g. locations of parentheses) and allow
+An alternative is to use existing Exprs, e.g., CallExpr for the above example.
+This would capture more call details (e.g., locations of parentheses) and allow
it to be treated uniformly with valid CallExprs. However, jamming the data we
-have into CallExpr forces us to weaken its invariants, e.g. arg count may be
+have into CallExpr forces us to weaken its invariants, e.g., arg count may be
wrong. This would introduce a huge burden on consumers of the AST to handle such
"impossible" cases. So when we're representing (rather than correcting) errors,
we use a distinct recovery node type with extremely weak invariants instead.
@@ -2048,7 +2048,7 @@ Types and dependence
^^^^^^^^^^^^^^^^^^^^
``RecoveryExpr`` is an ``Expr``, so it must have a type. In many cases the true
-type can't really be known until the code is corrected (e.g. a call to a
+type can't really be known until the code is corrected (e.g., a call to a
function that doesn't exist). And it means that we can't properly perform type
checks on some containing constructs, such as ``return 42 + unknownFunction()``.
@@ -2058,7 +2058,7 @@ mean dependence on a template parameter or how an error is repaired. The
``DependentTy``, and this suppresses type-based analysis in the same way it
would inside a template.
-In cases where we are confident about the concrete type (e.g. the return type
+In cases where we are confident about the concrete type (e.g., the return type
for a broken non-overloaded function call), the ``RecoveryExpr`` will have this
type. This allows more code to be typechecked, and produces a better AST and
more diagnostics. For example:
@@ -2071,7 +2071,7 @@ more diagnostics. For example:
Whether or not the ``RecoveryExpr`` has a dependent type, it is always
considered value-dependent, because its value isn't well-defined until the error
is resolved. Among other things, this means that clang doesn't emit more errors
-where a RecoveryExpr is used as a constant (e.g. array size), but also won't try
+where a RecoveryExpr is used as a constant (e.g., array size), but also won't try
to evaluate it.
ContainsErrors bit
@@ -2122,7 +2122,7 @@ cycles. One example of a cycle is the connection between a
``ClassTemplateDecl`` and its "templated" ``CXXRecordDecl``. The *templated*
``CXXRecordDecl`` represents all the fields and methods inside the class
template, while the ``ClassTemplateDecl`` holds the information which is
-related to being a template, i.e. template arguments, etc. We can get the
+related to being a template, i.e., template arguments, etc. We can get the
*templated* class (the ``CXXRecordDecl``) of a ``ClassTemplateDecl`` with
``ClassTemplateDecl::getTemplatedDecl()``. And we can get back a pointer of the
"described" class template from the *templated* class:
@@ -2145,7 +2145,7 @@ we skip the copy.
The informal definition of structural equivalency is the following:
Two nodes are **structurally equivalent** if they are
-- builtin types and refer to the same type, e.g. ``int`` and ``int`` are
+- builtin types and refer to the same type, e.g., ``int`` and ``int`` are
structurally equivalent,
- function types and all their parameters have structurally equivalent types,
- record types and all their fields in order of their definition have the same
@@ -2162,7 +2162,7 @@ mentioned properties, we have to check for equivalent template
parameters/arguments, etc.
The structural equivalent check can be and is used independently from the
-ASTImporter, e.g. the ``clang::Sema`` class uses it also.
+ASTImporter, e.g., the ``clang::Sema`` class uses it also.
The equivalence of nodes may depend on the equivalency of other pairs of nodes.
Thus, the check is implemented as a parallel graph traversal. We traverse
@@ -2195,7 +2195,7 @@ Redeclaration Chains
^^^^^^^^^^^^^^^^^^^^
The early version of the ``ASTImporter``'s merge mechanism squashed the
-declarations, i.e. it aimed to have only one declaration instead of maintaining
+declarations, i.e., it aimed to have only one declaration instead of maintaining
a whole redeclaration chain. This early approach simply skipped importing a
function prototype, but it imported a definition. To demonstrate the problem
with this approach let's consider an empty "to" context and the following
@@ -2225,7 +2225,7 @@ another definition, we will use the existing definition. However, we can import
prototype(s): we chain the newly imported prototype(s) to the existing
definition. Whenever we import a new prototype from a third context, that will
be added to the end of the redeclaration chain. This may result in long
-redeclaration chains in certain cases, e.g. if we import from several
+redeclaration chains in certain cases, e.g., if we import from several
translation units which include the same header with the prototype.
.. Squashing prototypes
@@ -2290,7 +2290,7 @@ Traversal during the Import
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The node specific import mechanisms are implemented in
-``ASTNodeImporter::VisitNode()`` functions, e.g. ``VisitFunctionDecl()``.
+``ASTNodeImporter::VisitNode()`` functions, e.g., ``VisitFunctionDecl()``.
When we import a declaration then first we import everything which is needed to
call the constructor of that declaration node. Everything which can be set
later is set after the node is created. For example, in case of a
@@ -2490,7 +2490,7 @@ In case of LLDB, an implementation of the ``ExternalASTSource`` interface is
attached to the AST context which is related to the parsed expression. This
implementation of the ``ExternalASTSource`` interface is realized with the help
of the ``ASTImporter`` class. This way, LLDB can reuse Clang's parsing
-machinery while synthesizing the underlying AST from the debug data (e.g. from
+machinery while synthesizing the underlying AST from the debug data (e.g., from
DWARF). From the view of the ``ASTImporter`` this means both the "to" and the
"from" context may have declaration contexts with external lexical storage. If
a ``DeclContext`` in the "to" AST context has external lexical storage then we
@@ -2573,7 +2573,7 @@ conflict error (ODR violation in C++). In this case, we return with an
clients of the ``ASTImporter`` may require a different, perhaps less
conservative and more liberal error handling strategy.
-E.g. static analysis clients may benefit if the node is created even if there
+E.g., static analysis clients may benefit if the node is created even if there
is a name conflict. During the CTU analysis of certain projects, we recognized
that there are global declarations which collide with declarations from other
translation units, but they are not referenced outside from their translation
@@ -2916,7 +2916,7 @@ Any error during satisfaction is recorded in ``ConstraintSatisfaction``.
for nested requirements, ``ConstraintSatisfaction`` is stored (including
diagnostics) in the AST, which is something we might want to improve.
-When an atomic constraint is not satified, we try to substitute into any
+When an atomic constraint is not satisfied, we try to substitute into any
enclosing concept-id using the same mechanism described above, for
diagnostics purpose, and inject that in the ``ConstraintSatisfaction``.
@@ -3584,7 +3584,7 @@ be specified by appending a ``+`` to the number. For example:
void f(); // expected-note 0+ {{previous declaration is here}}
void g(); // expected-note 1+ {{previous declaration is here}}
-In the first example, the diagnostic becomes optional, i.e. it will be
+In the first example, the diagnostic becomes optional, i.e., it will be
swallowed if it occurs, but will not generate an error if it does not occur. In
the second example, the diagnostic must occur at least once. As a short-hand,
"one or more" can be specified simply by ``+``. For example:
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 5c73e24..c75c170 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -193,7 +193,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| device | support non-contiguous array sections for target update | :good:`done` | https://github.com/llvm/llvm-project/pull/144635 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | pointer attachment | :good:`done` | |
+| device | pointer attachment | :part:`being repaired` | @abhinavgaba (https://github.com/llvm/llvm-project/pull/153683) |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| atomic | hints for the atomic construct | :good:`done` | D51233 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
@@ -627,6 +627,10 @@ implementation.
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
| loop grid/tile modifiers for sizes clause | :none:`unclaimed` | :none:`unclaimed` | |
+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| attach map-type modifier | :part:`In Progress` | :none:`unclaimed` | C/C++: @abhinavgaba; |
+| | | | RT: @abhinavgaba (https://github.com/llvm/llvm-project/pull/149036, |
+| | | | https://github.com/llvm/llvm-project/pull/158370) |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
OpenMP Extensions
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 4f62a67..fe77f91 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -128,6 +128,17 @@ AST Dumping Potentially Breaking Changes
- Default arguments of template template parameters are pretty-printed now.
+- Pretty-printing of ``asm`` attributes are now always the first attribute
+ on the right side of the declaration. Before we had, e.g.:
+
+ ``__attribute__(("visibility")) asm("string")``
+
+ Now we have:
+
+ ``asm("string") __attribute__(("visibility"))``
+
+ Which is accepted by both clang and gcc parsers.
+
Clang Frontend Potentially Breaking Changes
-------------------------------------------
- Members of anonymous unions/structs are now injected as ``IndirectFieldDecl``
@@ -271,6 +282,8 @@ Non-comprehensive list of changes in this release
allocation functions with a token ID can be enabled via the
``-fsanitize=alloc-token`` flag.
+- Clang now rejects the invalid use of ``constexpr`` with ``auto`` and an explicit type in C. (#GH163090)
+
New Compiler Flags
------------------
- New option ``-fno-sanitize-debug-trap-reasons`` added to disable emitting trap reasons into the debug info when compiling with trapping UBSan (e.g. ``-fsanitize-trap=undefined``).
@@ -476,6 +489,7 @@ Bug Fixes to C++ Support
- Fix a crash when attempting to deduce a deduction guide from a non deducible template template parameter. (#130604)
- Fix for clang incorrectly rejecting the default construction of a union with
nontrivial member when another member has an initializer. (#GH81774)
+- Fixed a template depth issue when parsing lambdas inside a type constraint. (#GH162092)
- Diagnose unresolved overload sets in non-dependent compound requirements. (#GH51246) (#GH97753)
Bug Fixes to AST Handling
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 792e2e0..a350acd 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -4957,6 +4957,18 @@ def HLSLResourceNonUniformIndex : LangBuiltin<"HLSL_LANG"> {
let Prototype = "uint32_t(uint32_t)";
}
+def HLSLResourceGetDimensionsX : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_resource_getdimensions_x"];
+ let Attributes = [NoThrow];
+ let Prototype = "void(...)";
+}
+
+def HLSLResourceGetStride : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_resource_getstride"];
+ let Attributes = [NoThrow];
+ let Prototype = "void(...)";
+}
+
def HLSLAll : LangBuiltin<"HLSL_LANG"> {
let Spellings = ["__builtin_hlsl_all"];
let Attributes = [NoThrow, Const];
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index 62c70fba..d03c778 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -124,13 +124,13 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in {
}
let Features = "ssse3" in {
- def pmulhrsw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psignb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
def psignw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def psignd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
}
let Features = "ssse3", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+ def pmulhrsw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pmaddubsw128 : X86Builtin<"_Vector<8, short>(_Vector<16, char>, _Vector<16, char>)">;
def pshufb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>)">;
}
@@ -608,7 +608,6 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant int)">;
def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
- def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def psadbw256 : X86Builtin<"_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
def psignb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
def psignw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -661,6 +660,7 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi
def psrawi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psradi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
+ def pmulhrsw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def pmulhuw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned short>, _Vector<16, unsigned short>)">;
def pmulhw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -1386,13 +1386,10 @@ let Features = "avx512bitalg", Attributes = [NoThrow, Const, RequiredVectorWidth
def vpshufbitqmb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, unsigned long long int)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
- def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
-}
-
let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def pavgb512 : X86Builtin<"_Vector<64, unsigned char>(_Vector<64, unsigned char>, _Vector<64, unsigned char>)">;
def pavgw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
+ def pmulhrsw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
def pmulhuw512 : X86Builtin<"_Vector<32, unsigned short>(_Vector<32, unsigned short>, _Vector<32, unsigned short>)">;
def pmulhw512 : X86Builtin<"_Vector<32, short>(_Vector<32, short>, _Vector<32, short>)">;
}
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 07a8724..96d8300 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1013,9 +1013,9 @@ let ManualCodegen = [{
}] in {
let HasFRMRoundModeOp = true in {
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfsub : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
+ defm vfadd : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1023,14 +1023,14 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfmul : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode;
defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
}
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSet;
- defm vfsub : RVVFloatingBinBuiltinSet;
- defm vfrsub : RVVFloatingBinVFBuiltinSet;
+ defm vfadd : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1038,7 +1038,7 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSet;
+ defm vfmul : RVVFloatingBinBuiltinSet<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSet;
defm vfrdiv : RVVFloatingBinVFBuiltinSet;
}
@@ -1065,6 +1065,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
}
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
@@ -1081,6 +1085,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
}
}
@@ -1170,6 +1178,8 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vvu"]]>;
}
// 13.8. Vector Floating-Point Square-Root Instruction
defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>;
@@ -1180,21 +1190,26 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vv"]]>;
}
// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "y", [["v", "v", "vv"]]>;
+
// 13.11. Vector Floating-Point MIN/MAX Instructions
-defm vfmin : RVVFloatingBinBuiltinSet;
-defm vfmax : RVVFloatingBinBuiltinSet;
+defm vfmin : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfmax : RVVFloatingBinBuiltinSet<HasBF=1>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
-defm vfsgnj : RVVFloatingBinBuiltinSet;
-defm vfsgnjn : RVVFloatingBinBuiltinSet;
-defm vfsgnjx : RVVFloatingBinBuiltinSet;
+defm vfsgnj : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjn : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjx : RVVFloatingBinBuiltinSet<HasBF=1>;
}
defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">;
let RequiredFeatures = ["zvfh"] in
@@ -1219,6 +1234,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfclass : RVVOp0BuiltinSet<"vfclass", "y", [["v", "vUv", "Uvv"]]>;
}
// 13.15. Vector Floating-Point Merge Instruction
@@ -1239,6 +1256,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x",
[["vfm", "v", "vvem"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "y",
+ [["vfm", "v", "vvem"]]>;
}
// 13.16. Vector Floating-Point Move Instruction
@@ -1252,6 +1272,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x",
[["f", "v", "ve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "y",
+ [["f", "v", "ve"]]>;
}
// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
@@ -1287,10 +1310,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>;
}
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfwcvt_f_bf16" in {
+ defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Yw", "YwUv"]]>;
+ defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Yw", "Ywv"]]>;
+ }
let OverloadedName = "vfwcvt_f" in {
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>;
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "y", [["vw", "wv"]]>;
}
}
@@ -1300,17 +1329,23 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_rtz_x" in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_rod_f" in {
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_rod_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert BF16 to FP32
@@ -1363,11 +1398,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYwu"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYwu"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>;
@@ -1382,6 +1421,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vwu"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1430,11 +1471,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>;
@@ -1449,6 +1494,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1578,6 +1625,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
let RequiredFeatures = ["zvfh"] in
defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x",
[["s", "ve", "ev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "y",
+ [["s", "ve", "ev"]]>;
}
let OverloadedName = "vfmv_s",
UnMaskedPolicyScheme = HasPassthruOperand,
@@ -1589,6 +1639,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x",
[["f", "v", "ve"],
["x", "Uv", "UvUe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "y",
+ [["f", "v", "ve"]]>;
}
}
@@ -1601,11 +1654,11 @@ defm vslidedown : RVVSlideDownBuiltinSet;
// 16.3.3. Vector Slide1up Instructions
let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
-defm vfslide1up : RVVFloatingBinVFBuiltinSet;
+defm vfslide1up : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.3.4. Vector Slide1down Instruction
defm vslide1down : RVVSlideOneBuiltinSet;
-defm vfslide1down : RVVFloatingBinVFBuiltinSet;
+defm vfslide1down : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.4. Vector Register Gather Instructions
// signed and floating type
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 767bcee..eaa2ba4 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -83,6 +83,8 @@
// elements of the same width
// F: given a vector type, compute the vector type with floating-point type
// elements of the same width
+// Y: given a vector type, compute the vector type with bfloat16 type elements
+// of the same width
// S: given a vector type, computes its equivalent one for LMUL=1. This is a
// no-op if the vector was already LMUL=1
// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
@@ -470,6 +472,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvv"],
["vf", "v", "vvev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvv"],
+ ["vf", "v", "vvev"]]>;
}
multiclass RVVFloatingTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
@@ -479,6 +485,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvvu"],
["vf", "v", "vvevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvvu"],
+ ["vf", "v", "vvevu"]]>;
}
}
@@ -491,6 +501,10 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvv"],
["vf", "w", "wwev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvv"],
+ ["vf", "vw", "wwev"]]>;
}
multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "f",
@@ -500,10 +514,14 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvvu"],
["vf", "w", "wwevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvvu"],
+ ["vf", "vw", "wwevu"]]>;
}
}
-multiclass RVVFloatingBinBuiltinSet {
+multiclass RVVFloatingBinBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
@@ -511,9 +529,15 @@ multiclass RVVFloatingBinBuiltinSet {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinBuiltinSetRoundingMode {
+multiclass RVVFloatingBinBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
@@ -521,22 +545,38 @@ multiclass RVVFloatingBinBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvu"],
+ ["vf", "v", "vveu"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSet {
+multiclass RVVFloatingBinVFBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vve"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSetRoundingMode {
+multiclass RVVFloatingBinVFBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vveu"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vveu"]]>;
+ }
}
multiclass RVVFloatingMaskOutBuiltinSet {
@@ -547,6 +587,10 @@ multiclass RVVFloatingMaskOutBuiltinSet {
defm "" : RVVOp0Op1BuiltinSet<NAME, "x",
[["vv", "vm", "mvv"],
["vf", "vm", "mve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOp0Op1BuiltinSet<NAME, "y",
+ [["vv", "vm", "mvv"],
+ ["vf", "vm", "mve"]]>;
}
multiclass RVVFloatingMaskOutVFBuiltinSet
@@ -748,6 +792,10 @@ multiclass RVVFloatingWidenBinBuiltinSet {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
@@ -758,6 +806,10 @@ multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSet {
@@ -768,6 +820,10 @@ multiclass RVVFloatingWidenOp0BinBuiltinSet {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwv"],
["wf", "w", "wwe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwv"],
+ ["wf", "ew", "wwe"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
@@ -778,4 +834,8 @@ multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwvu"],
["wf", "w", "wweu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwvu"],
+ ["wf", "ew", "wweu"]]>;
}
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index e3faaac..add4c15 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -3961,6 +3961,13 @@ public:
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = {});
+private:
+ // Perform a check on an AsmLabel to verify its consistency and emit
+ // diagnostics in case of an error.
+ void CheckAsmLabel(Scope *S, Expr *AsmLabelExpr, StorageClass SC,
+ TypeSourceInfo *TInfo, VarDecl *);
+
+public:
/// Perform semantic checking on a newly-created variable
/// declaration.
///
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index c233ca1..4aee165 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -211,6 +211,16 @@ protected:
getExtraInvalidatedValues(ValueList &Values,
RegionAndSymbolInvalidationTraits *ETraits) const {}
+ /// A state for looking up relevant Environment entries (arguments, return
+ /// value), dynamic type information and similar "stable" things.
+ /// WARNING: During the evaluation of a function call, several state
+ /// transitions happen, so this state can become partially obsolete!
+ ///
+ /// TODO: Instead of storing a complete state object in the CallEvent, only
+ /// store the relevant parts (such as argument/return SVals etc.) that aren't
+ /// allowed to become obsolete until the end of the call evaluation.
+ ProgramStateRef getState() const { return State; }
+
public:
CallEvent &operator=(const CallEvent &) = delete;
virtual ~CallEvent() = default;
@@ -231,8 +241,11 @@ public:
}
void setForeign(bool B) const { Foreign = B; }
- /// The state in which the call is being evaluated.
- const ProgramStateRef &getState() const { return State; }
+ /// NOTE: There are plans for refactoring that would eliminate this method.
+ /// Prefer to use CheckerContext::getASTContext if possible!
+ const ASTContext &getASTContext() const {
+ return getState()->getStateManager().getContext();
+ }
/// The context in which the call is being evaluated.
const LocationContext *getLocationContext() const { return LCtx; }
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 5838cf8..0cb4910 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3621,6 +3621,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 16141b2..e308c17 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11819,6 +11819,14 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case clang::X86::BI__builtin_ia32_pavgw512:
return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pmaddubsw128:
case clang::X86::BI__builtin_ia32_pmaddubsw256:
case clang::X86::BI__builtin_ia32_pmaddubsw512:
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 08d9913..c05142e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -307,8 +307,8 @@ class AggValueSlot {
/// This is set to true if some external code is responsible for setting up a
/// destructor for the slot. Otherwise the code which constructs it should
/// push the appropriate cleanup.
- LLVM_PREFERRED_TYPE(bool)
- [[maybe_unused]] unsigned destructedFlag : 1;
+ [[maybe_unused]]
+ LLVM_PREFERRED_TYPE(bool) unsigned destructedFlag : 1;
/// This is set to true if the memory in the slot is known to be zero before
/// the assignment into it. This means that zero fields don't need to be set.
@@ -326,16 +326,16 @@ class AggValueSlot {
/// over. Since it's invalid in general to memcpy a non-POD C++
/// object, it's important that this flag never be set when
/// evaluating an expression which constructs such an object.
- LLVM_PREFERRED_TYPE(bool)
- [[maybe_unused]] unsigned aliasedFlag : 1;
+ [[maybe_unused]]
+ LLVM_PREFERRED_TYPE(bool) unsigned aliasedFlag : 1;
/// This is set to true if the tail padding of this slot might overlap
/// another object that may have already been initialized (and whose
/// value must be preserved by this initialization). If so, we may only
/// store up to the dsize of the type. Otherwise we can widen stores to
/// the size of the type.
- LLVM_PREFERRED_TYPE(bool)
- [[maybe_unused]] unsigned overlapFlag : 1;
+ [[maybe_unused]]
+ LLVM_PREFERRED_TYPE(bool) unsigned overlapFlag : 1;
public:
enum IsDestructed_t { IsNotDestructed, IsDestructed };
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 4f2f5a76..384bd59 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -160,6 +160,16 @@ static Value *handleHlslSplitdouble(const CallExpr *E, CodeGenFunction *CGF) {
return LastInst;
}
+static Value *emitBufferStride(CodeGenFunction *CGF, const Expr *HandleExpr,
+ LValue &Stride) {
+ // Figure out the stride of the buffer elements from the handle type.
+ auto *HandleTy =
+ cast<HLSLAttributedResourceType>(HandleExpr->getType().getTypePtr());
+ QualType ElementTy = HandleTy->getContainedType();
+ Value *StrideValue = CGF->getTypeSize(ElementTy);
+ return CGF->Builder.CreateStore(StrideValue, Stride.getAddress());
+}
+
// Return dot product intrinsic that corresponds to the QT scalar type
static Intrinsic::ID getDotProductIntrinsic(CGHLSLRuntime &RT, QualType QT) {
if (QT->isFloatingType())
@@ -372,6 +382,19 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
RetTy, CGM.getHLSLRuntime().getNonUniformResourceIndexIntrinsic(),
ArrayRef<Value *>{IndexOp});
}
+ case Builtin::BI__builtin_hlsl_resource_getdimensions_x: {
+ Value *Handle = EmitScalarExpr(E->getArg(0));
+ LValue Dim = EmitLValue(E->getArg(1));
+ llvm::Type *RetTy = llvm::Type::getInt32Ty(getLLVMContext());
+ Value *DimValue = Builder.CreateIntrinsic(
+ RetTy, CGM.getHLSLRuntime().getGetDimensionsXIntrinsic(),
+ ArrayRef<Value *>{Handle});
+ return Builder.CreateStore(DimValue, Dim.getAddress());
+ }
+ case Builtin::BI__builtin_hlsl_resource_getstride: {
+ LValue Stride = EmitLValue(E->getArg(1));
+ return emitBufferStride(this, E->getArg(0), Stride);
+ }
case Builtin::BI__builtin_hlsl_all: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 7c6c285..103b4a9 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -135,6 +135,7 @@ public:
GENERATE_HLSL_INTRINSIC_FUNCTION(BufferUpdateCounter, resource_updatecounter)
GENERATE_HLSL_INTRINSIC_FUNCTION(GroupMemoryBarrierWithGroupSync,
group_memory_barrier_with_group_sync)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(GetDimensionsX, resource_getdimensions_x)
//===----------------------------------------------------------------------===//
// End of reserved area for HLSL intrinsic getters.
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index cd4c1aa..b7d8569 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -411,7 +411,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if (CurrentState.BreakBeforeClosingBrace &&
(Current.closesBlockOrBlockTypeList(Style) ||
- (Current.is(tok::r_brace) &&
+ (Current.is(tok::r_brace) && Current.MatchingParen &&
Current.isBlockIndentedInitRBrace(Style)))) {
return true;
}
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index cb3fc1c..cf02280 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -65,12 +65,13 @@ bool FormatToken::isTypeOrIdentifier(const LangOptions &LangOpts) const {
bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
assert(is(tok::r_brace));
+ assert(MatchingParen);
+ assert(MatchingParen->is(tok::l_brace));
if (!Style.Cpp11BracedListStyle ||
Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent) {
return false;
}
const auto *LBrace = MatchingParen;
- assert(LBrace && LBrace->is(tok::l_brace));
if (LBrace->is(BK_BracedInit))
return true;
if (LBrace->Previous && LBrace->Previous->is(tok::equal))
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 5b784ed..ffbd383 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3794,18 +3794,12 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (!Current.Tok.getIdentifierInfo())
+ if (Current.isNoneOf(tok::identifier, tok::kw_operator))
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
- if (Prev->is(tok::coloncolon))
- Prev = Prev->Previous;
-
- if (!Prev)
- return false;
-
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3854,6 +3848,8 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_friend))
+ return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index fa7f4c2..d35bc0e 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1650,9 +1650,8 @@ _mm256_mul_epi32(__m256i __a, __m256i __b) {
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
@@ -1670,8 +1669,7 @@ _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
+_mm256_mulhi_epu16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhuw256((__v16hu)__a, (__v16hu)__b);
}
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index 23b2d29..ac75b6c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -1003,23 +1003,20 @@ _mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 639fb60..0fcfe37 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -1510,28 +1510,28 @@ _mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
__builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index ee96caa..5d0f20f 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -544,8 +544,8 @@ _mm_maddubs_pi16(__m64 __a, __m64 __b) {
/// A 128-bit vector of [8 x i16] containing one of the source operands.
/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
@@ -563,11 +563,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
/// A 64-bit vector of [4 x i16] containing one of the source operands.
/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_mulhrs_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_pi16(__m64 __a, __m64 __b) {
+ return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__zext128(__a),
+ (__v8hi)__zext128(__b)));
}
/// Copies the 8-bit integers from a 128-bit integer vector to the
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index dbc7cbc..330a9c6 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -533,6 +533,12 @@ bool Parser::isTypeConstraintAnnotation() {
bool Parser::TryAnnotateTypeConstraint() {
if (!getLangOpts().CPlusPlus20)
return false;
+ // The type constraint may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increment
+ // the template depth as these parameters would not be instantiated
+ // at the current depth.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
CXXScopeSpec SS;
bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index 184d31e..9da3d0d 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -1369,7 +1369,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (S.getLangOpts().C23 &&
getConstexprSpecifier() == ConstexprSpecKind::Constexpr &&
- StorageClassSpec == SCS_extern) {
+ getTypeSpecType() != TST_unspecified &&
+ (StorageClassSpec == SCS_extern || StorageClassSpec == SCS_auto)) {
S.Diag(ConstexprLoc, diag::err_invalid_decl_spec_combination)
<< DeclSpec::getSpecifierName(getStorageClassSpec())
<< SourceRange(getStorageClassSpecLoc());
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
index 40c318a..066acf6 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
@@ -57,6 +57,29 @@ CXXConstructorDecl *lookupCopyConstructor(QualType ResTy) {
return CD;
return nullptr;
}
+
+ParameterABI
+convertParamModifierToParamABI(HLSLParamModifierAttr::Spelling Modifier) {
+ assert(Modifier != HLSLParamModifierAttr::Spelling::Keyword_in &&
+ "HLSL 'in' parameters modifier cannot be converted to ParameterABI");
+ switch (Modifier) {
+ case HLSLParamModifierAttr::Spelling::Keyword_out:
+ return ParameterABI::HLSLOut;
+ case HLSLParamModifierAttr::Spelling::Keyword_inout:
+ return ParameterABI::HLSLInOut;
+ default:
+ llvm_unreachable("Invalid HLSL parameter modifier");
+ }
+}
+
+QualType getInoutParameterType(ASTContext &AST, QualType Ty) {
+ assert(!Ty->isReferenceType() &&
+ "Pointer and reference types cannot be inout or out parameters");
+ Ty = AST.getLValueReferenceType(Ty);
+ Ty.addRestrict();
+ return Ty;
+}
+
} // namespace
// Builder for template arguments of builtin types. Used internally
@@ -430,19 +453,36 @@ BuiltinTypeMethodBuilder::addParam(StringRef Name, QualType Ty,
void BuiltinTypeMethodBuilder::createDecl() {
assert(Method == nullptr && "Method or constructor is already created");
- // create method or constructor type
+ // create function prototype
ASTContext &AST = DeclBuilder.SemaRef.getASTContext();
SmallVector<QualType> ParamTypes;
- for (Param &MP : Params)
+ SmallVector<FunctionType::ExtParameterInfo> ParamExtInfos(Params.size());
+ uint32_t ArgIndex = 0;
+
+ // Create function prototype.
+ bool UseParamExtInfo = false;
+ for (Param &MP : Params) {
+ if (MP.Modifier != HLSLParamModifierAttr::Keyword_in) {
+ UseParamExtInfo = true;
+ FunctionType::ExtParameterInfo &PI = ParamExtInfos[ArgIndex];
+ ParamExtInfos[ArgIndex] =
+ PI.withABI(convertParamModifierToParamABI(MP.Modifier));
+ if (!MP.Ty->isDependentType())
+ MP.Ty = getInoutParameterType(AST, MP.Ty);
+ }
ParamTypes.emplace_back(MP.Ty);
+ ++ArgIndex;
+ }
FunctionProtoType::ExtProtoInfo ExtInfo;
+ if (UseParamExtInfo)
+ ExtInfo.ExtParameterInfos = ParamExtInfos.data();
if (IsConst)
ExtInfo.TypeQuals.addConst();
QualType FuncTy = AST.getFunctionType(ReturnTy, ParamTypes, ExtInfo);
- // create method or constructor decl
+ // Create method or constructor declaration.
auto *TSInfo = AST.getTrivialTypeSourceInfo(FuncTy, SourceLocation());
DeclarationNameInfo NameInfo = DeclarationNameInfo(Name, SourceLocation());
if (IsCtor)
@@ -455,7 +495,7 @@ void BuiltinTypeMethodBuilder::createDecl() {
AST, DeclBuilder.Record, SourceLocation(), NameInfo, FuncTy, TSInfo, SC,
false, false, ConstexprSpecKind::Unspecified, SourceLocation());
- // create params & set them to the function prototype
+ // Create params & set them to the method/constructor and function prototype.
SmallVector<ParmVarDecl *> ParmDecls;
unsigned CurScopeDepth = DeclBuilder.SemaRef.getCurScope()->getDepth();
auto FnProtoLoc =
@@ -1258,5 +1298,37 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addConsumeMethod() {
.finalize();
}
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addGetDimensionsMethodForBuffer() {
+ using PH = BuiltinTypeMethodBuilder::PlaceHolder;
+ ASTContext &AST = SemaRef.getASTContext();
+ QualType UIntTy = AST.UnsignedIntTy;
+
+ QualType HandleTy = getResourceHandleField()->getType();
+ auto *AttrResTy = cast<HLSLAttributedResourceType>(HandleTy.getTypePtr());
+
+ // Structured buffers except {RW}ByteAddressBuffer have overload
+ // GetDimensions(out uint numStructs, out uint stride).
+ if (AttrResTy->getAttrs().RawBuffer &&
+ AttrResTy->getContainedType() != AST.Char8Ty) {
+ return BuiltinTypeMethodBuilder(*this, "GetDimensions", AST.VoidTy)
+ .addParam("numStructs", UIntTy, HLSLParamModifierAttr::Keyword_out)
+ .addParam("stride", UIntTy, HLSLParamModifierAttr::Keyword_out)
+ .callBuiltin("__builtin_hlsl_resource_getdimensions_x", QualType(),
+ PH::Handle, PH::_0)
+ .callBuiltin("__builtin_hlsl_resource_getstride", QualType(),
+ PH::Handle, PH::_1)
+ .finalize();
+ }
+
+ // Typed buffers and {RW}ByteAddressBuffer have overload
+ // GetDimensions(out uint dim).
+ return BuiltinTypeMethodBuilder(*this, "GetDimensions", AST.VoidTy)
+ .addParam("dim", UIntTy, HLSLParamModifierAttr::Keyword_out)
+ .callBuiltin("__builtin_hlsl_resource_getdimensions_x", QualType(),
+ PH::Handle, PH::_0)
+ .finalize();
+}
+
} // namespace hlsl
} // namespace clang
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
index 86cbd10..95e3a6c 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
@@ -94,6 +94,8 @@ public:
BuiltinTypeDeclBuilder &addAppendMethod();
BuiltinTypeDeclBuilder &addConsumeMethod();
+ BuiltinTypeDeclBuilder &addGetDimensionsMethodForBuffer();
+
private:
BuiltinTypeDeclBuilder &addCreateFromBinding();
BuiltinTypeDeclBuilder &addCreateFromImplicitBinding();
diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp
index f28a037..6be84f1 100644
--- a/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -380,6 +380,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -392,6 +393,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -404,6 +406,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -415,6 +418,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
/*RawBuffer=*/true, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -428,6 +432,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.addLoadMethods()
.addIncrementCounterMethod()
.addDecrementCounterMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -439,6 +444,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/true)
.addAppendMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -450,6 +456,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/true)
.addConsumeMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -464,6 +471,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.addLoadMethods()
.addIncrementCounterMethod()
.addDecrementCounterMethod()
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
@@ -472,6 +480,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/false)
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWByteAddressBuffer")
@@ -479,6 +488,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
/*RawBuffer=*/true, /*HasCounter=*/false)
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace,
@@ -487,6 +497,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true,
/*RawBuffer=*/true, /*HasCounter=*/false)
+ .addGetDimensionsMethodForBuffer()
.completeDefinition();
});
}
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 04d46d6..fc3aabf 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -7640,6 +7640,58 @@ static bool isMainVar(DeclarationName Name, VarDecl *VD) {
VD->isExternC());
}
+void Sema::CheckAsmLabel(Scope *S, Expr *E, StorageClass SC,
+ TypeSourceInfo *TInfo, VarDecl *NewVD) {
+
+ // Quickly return if the function does not have an `asm` attribute.
+ if (E == nullptr)
+ return;
+
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+ QualType R = TInfo->getType();
+ if (S->getFnParent() != nullptr) {
+ switch (SC) {
+ case SC_None:
+ case SC_Auto:
+ Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
+ break;
+ case SC_Register:
+ // Local Named register
+ if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
+ DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ break;
+ case SC_Static:
+ case SC_Extern:
+ case SC_PrivateExtern:
+ break;
+ }
+ } else if (SC == SC_Register) {
+ // Global Named register
+ if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
+ const auto &TI = Context.getTargetInfo();
+ bool HasSizeMismatch;
+
+ if (!TI.isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ else if (!TI.validateGlobalRegisterVariable(Label, Context.getTypeSize(R),
+ HasSizeMismatch))
+ Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
+ else if (HasSizeMismatch)
+ Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
+ }
+
+ if (!R->isIntegralType(Context) && !R->isPointerType()) {
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ diag::err_asm_unsupported_register_type)
+ << TInfo->getTypeLoc().getSourceRange();
+ NewVD->setInvalidDecl(true);
+ }
+ }
+}
+
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
@@ -8124,6 +8176,26 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
+ if (Expr *E = D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+
+ // Insert the asm attribute.
+ NewVD->addAttr(AsmLabelAttr::Create(Context, Label, SE->getStrTokenLoc(0)));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo *, AsmLabelAttr *>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ if (isDeclExternC(NewVD)) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/ 1 << NewVD;
+ }
+ }
+
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
@@ -8174,65 +8246,11 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
- // Handle GNU asm-label extension (encoded as an attribute).
- if (Expr *E = D.getAsmLabel()) {
- // The parser guarantees this is a string.
- StringLiteral *SE = cast<StringLiteral>(E);
- StringRef Label = SE->getString();
- if (S->getFnParent() != nullptr) {
- switch (SC) {
- case SC_None:
- case SC_Auto:
- Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
- break;
- case SC_Register:
- // Local Named register
- if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
- DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
- break;
- case SC_Static:
- case SC_Extern:
- case SC_PrivateExtern:
- break;
- }
- } else if (SC == SC_Register) {
- // Global Named register
- if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
- const auto &TI = Context.getTargetInfo();
- bool HasSizeMismatch;
-
- if (!TI.isValidGCCRegisterName(Label))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
- else if (!TI.validateGlobalRegisterVariable(Label,
- Context.getTypeSize(R),
- HasSizeMismatch))
- Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
- else if (HasSizeMismatch)
- Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
- }
-
- if (!R->isIntegralType(Context) && !R->isPointerType()) {
- Diag(TInfo->getTypeLoc().getBeginLoc(),
- diag::err_asm_unsupported_register_type)
- << TInfo->getTypeLoc().getSourceRange();
- NewVD->setInvalidDecl(true);
- }
- }
-
- NewVD->addAttr(AsmLabelAttr::Create(Context, Label, SE->getStrTokenLoc(0)));
- } else if (!ExtnameUndeclaredIdentifiers.empty()) {
- llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
- ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
- if (I != ExtnameUndeclaredIdentifiers.end()) {
- if (isDeclExternC(NewVD)) {
- NewVD->addAttr(I->second);
- ExtnameUndeclaredIdentifiers.erase(I);
- } else
- Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
- << /*Variable*/1 << NewVD;
- }
- }
+ // Check the ASM label here, as we need to know all other attributes of the
+ // Decl first. Otherwise, we can't know if the asm label refers to the
+ // host or device in a CUDA context. The device has other registers than
+ // host and we must know where the function will be placed.
+ CheckAsmLabel(S, D.getAsmLabel(), SC, TInfo, NewVD);
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index f347066..5b3e89f 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -3006,6 +3006,24 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
TheCall->setType(CounterHandleTy);
break;
}
+ case Builtin::BI__builtin_hlsl_resource_getdimensions_x: {
+ ASTContext &AST = SemaRef.getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 2) ||
+ CheckResourceHandle(&SemaRef, TheCall, 0) ||
+ CheckArgTypeMatches(&SemaRef, TheCall->getArg(1), AST.UnsignedIntTy) ||
+ CheckModifiableLValue(&SemaRef, TheCall, 1))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_resource_getstride: {
+ ASTContext &AST = SemaRef.getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 2) ||
+ CheckResourceHandle(&SemaRef, TheCall, 0) ||
+ CheckArgTypeMatches(&SemaRef, TheCall->getArg(1), AST.UnsignedIntTy) ||
+ CheckModifiableLValue(&SemaRef, TheCall, 1))
+ return true;
+ break;
+ }
case Builtin::BI__builtin_hlsl_and:
case Builtin::BI__builtin_hlsl_or: {
if (SemaRef.checkArgCount(TheCall, 2))
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 3ba93ff9..c5ef0d5 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
}
else if (Info.ElementType->isBFloat16Type() &&
!FeatureMap.lookup("zvfbfmin") &&
- !FeatureMap.lookup("xandesvbfhcvt"))
+ !FeatureMap.lookup("xandesvbfhcvt") &&
+ !FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
Diag(Loc, diag::err_riscv_type_requires_extension, D)
<< Ty << "zvfbfmin or xandesvbfhcvt";
diff --git a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index bf35bee..3ddd659 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -104,7 +104,7 @@ class RAIIMutexDescriptor {
// this function is called instead of early returning it. To avoid this, a
// bool variable (IdentifierInfoInitialized) is used and the function will
// be run only once.
- const auto &ASTCtx = Call.getState()->getStateManager().getContext();
+ const auto &ASTCtx = Call.getASTContext();
Guard = &ASTCtx.Idents.get(GuardName);
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 9d3aeff..2420848 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -929,7 +929,7 @@ ObjCDeallocChecker::getValueReleasedByNillingOut(const ObjCMethodCall &M,
SVal Arg = M.getArgSVal(0);
ProgramStateRef notNilState, nilState;
std::tie(notNilState, nilState) =
- M.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
+ C.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
if (!(nilState && !notNilState))
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index f984caf..227cbfa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -34,7 +34,7 @@ class ObjCSuperDeallocChecker
this, "[super dealloc] should not be called more than once",
categories::CoreFoundationObjectiveC};
- void initIdentifierInfoAndSelectors(ASTContext &Ctx) const;
+ void initIdentifierInfoAndSelectors(const ASTContext &Ctx) const;
bool isSuperDeallocMessage(const ObjCMethodCall &M) const;
@@ -214,8 +214,8 @@ void ObjCSuperDeallocChecker::diagnoseCallArguments(const CallEvent &CE,
}
}
-void
-ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(ASTContext &Ctx) const {
+void ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(
+ const ASTContext &Ctx) const {
if (IIdealloc)
return;
@@ -230,7 +230,7 @@ ObjCSuperDeallocChecker::isSuperDeallocMessage(const ObjCMethodCall &M) const {
if (M.getOriginExpr()->getReceiverKind() != ObjCMessageExpr::SuperInstance)
return false;
- ASTContext &Ctx = M.getState()->getStateManager().getContext();
+ const ASTContext &Ctx = M.getASTContext();
initIdentifierInfoAndSelectors(Ctx);
return M.getSelector() == SELdealloc;
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
index 4fc1c57..db8bbee 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
@@ -211,13 +211,13 @@ private:
if (!DefaultType)
return;
- ProgramStateRef State = ConstructorCall->getState();
+ ProgramStateRef State = C.getState();
State = State->set<VariantHeldTypeMap>(ThisMemRegion, *DefaultType);
C.addTransition(State);
}
bool handleStdGetCall(const CallEvent &Call, CheckerContext &C) const {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
const auto &ArgType = Call.getArgSVal(0)
.getType(C.getASTContext())
diff --git a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
index dec4612..b8fb572 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
+++ b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -52,7 +52,7 @@ removeInformationStoredForDeadInstances(const CallEvent &Call,
template <class TypeMap>
void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
SVal ThisSVal) {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
if (!State)
return;
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 44c6f9f..8ee4832 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -731,19 +731,22 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<> UpdatedCall = Call.cloneWithState(State);
+
// Check if any of the EvalCall callbacks can evaluate the call.
for (const auto &EvalCallChecker : EvalCallCheckers) {
// TODO: Support the situation when the call doesn't correspond
// to any Expr.
ProgramPoint L = ProgramPoint::getProgramPoint(
- Call.getOriginExpr(), ProgramPoint::PostStmtKind,
+ UpdatedCall->getOriginExpr(), ProgramPoint::PostStmtKind,
Pred->getLocationContext(), EvalCallChecker.Checker);
bool evaluated = false;
- { // CheckerContext generates transitions(populates checkDest) on
+ { // CheckerContext generates transitions (populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
CheckerContext C(B, Eng, Pred, L);
- evaluated = EvalCallChecker(Call, C);
+ evaluated = EvalCallChecker(*UpdatedCall, C);
}
#ifndef NDEBUG
if (evaluated && evaluatorChecker) {
@@ -774,7 +777,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!evaluatorChecker) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
- Eng.defaultEvalCall(B, Pred, Call, CallOpts);
+ Eng.defaultEvalCall(B, Pred, *UpdatedCall, CallOpts);
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 0c491b8..ac6c1d7 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -628,6 +628,8 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
const Expr *E = Call.getOriginExpr();
// FIXME: Constructors to placement arguments of operator new
// are not supported yet.
@@ -653,6 +655,8 @@ ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
ExplodedNode *Pred,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
ProgramStateRef State = Pred->getState();
ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
if (CleanedState == State) {
@@ -670,35 +674,33 @@ void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
}
void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
- const CallEvent &Call) {
- // WARNING: At this time, the state attached to 'Call' may be older than the
- // state in 'Pred'. This is a minor optimization since CheckerManager will
- // use an updated CallEvent instance when calling checkers, but if 'Call' is
- // ever used directly in this function all callers should be updated to pass
- // the most recent state. (It is probably not worth doing the work here since
- // for some callers this will not be necessary.)
+ const CallEvent &CallTemplate) {
+ // NOTE: CallTemplate is called a "template" because its attached state may
+ // be obsolete (compared to the state of Pred). The state-dependent methods
+ // of CallEvent should be used only after a `cloneWithState` call that
+ // attaches the up-to-date state to this template object.
// Run any pre-call checks using the generic call interface.
ExplodedNodeSet dstPreVisit;
- getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
- Call, *this);
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, CallTemplate,
+ *this);
// Actually evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call, and get a callback at
// defaultEvalCall if all of them fail.
ExplodedNodeSet dstCallEvaluated;
- getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
- Call, *this, EvalCallOptions());
+ getCheckerManager().runCheckersForEvalCall(
+ dstCallEvaluated, dstPreVisit, CallTemplate, *this, EvalCallOptions());
// If there were other constructors called for object-type arguments
// of this call, clean them up.
ExplodedNodeSet dstArgumentCleanup;
for (ExplodedNode *I : dstCallEvaluated)
- finishArgumentConstruction(dstArgumentCleanup, I, Call);
+ finishArgumentConstruction(dstArgumentCleanup, I, CallTemplate);
ExplodedNodeSet dstPostCall;
getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
- Call, *this);
+ CallTemplate, *this);
// Escaping symbols conjured during invalidating the regions above.
// Note that, for inlined calls the nodes were put back into the worklist,
@@ -708,12 +710,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run pointerEscape callback with the newly conjured symbols.
SmallVector<std::pair<SVal, SVal>, 8> Escaped;
for (ExplodedNode *I : dstPostCall) {
- NodeBuilder B(I, Dst, *currBldrCtx);
ProgramStateRef State = I->getState();
+ CallEventRef<> Call = CallTemplate.cloneWithState(State);
+ NodeBuilder B(I, Dst, *currBldrCtx);
Escaped.clear();
{
unsigned Arg = -1;
- for (const ParmVarDecl *PVD : Call.parameters()) {
+ for (const ParmVarDecl *PVD : Call->parameters()) {
++Arg;
QualType ParamTy = PVD->getType();
if (ParamTy.isNull() ||
@@ -722,13 +725,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
QualType Pointee = ParamTy->getPointeeType();
if (Pointee.isConstQualified() || Pointee->isVoidType())
continue;
- if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
+ if (const MemRegion *MR = Call->getArgSVal(Arg).getAsRegion())
Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
}
}
State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
- PSK_EscapeOutParameters, &Call);
+ PSK_EscapeOutParameters, &*Call);
if (State == I->getState())
Dst.insert(I);
@@ -1212,48 +1215,47 @@ static bool isTrivialObjectAssignment(const CallEvent &Call) {
}
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
- const CallEvent &CallTemplate,
+ const CallEvent &Call,
const EvalCallOptions &CallOpts) {
// Make sure we have the most recent state attached to the call.
ProgramStateRef State = Pred->getState();
- CallEventRef<> Call = CallTemplate.cloneWithState(State);
// Special-case trivial assignment operators.
- if (isTrivialObjectAssignment(*Call)) {
- performTrivialCopy(Bldr, Pred, *Call);
+ if (isTrivialObjectAssignment(Call)) {
+ performTrivialCopy(Bldr, Pred, Call);
return;
}
// Try to inline the call.
// The origin expression here is just used as a kind of checksum;
// this should still be safe even for CallEvents that don't come from exprs.
- const Expr *E = Call->getOriginExpr();
+ const Expr *E = Call.getOriginExpr();
ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
if (InlinedFailedState) {
// If we already tried once and failed, make sure we don't retry later.
State = InlinedFailedState;
} else {
- RuntimeDefinition RD = Call->getRuntimeDefinition();
- Call->setForeign(RD.isForeign());
+ RuntimeDefinition RD = Call.getRuntimeDefinition();
+ Call.setForeign(RD.isForeign());
const Decl *D = RD.getDecl();
- if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
+ if (shouldInlineCall(Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
AnalyzerOptions &Options = getAnalysisManager().options;
// Explore with and without inlining the call.
if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
- BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+ BifurcateCall(RD.getDispatchRegion(), Call, D, Bldr, Pred);
return;
}
// Don't inline if we're not in any dynamic dispatch mode.
if (Options.getIPAMode() != IPAK_DynamicDispatch) {
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
return;
}
}
- ctuBifurcate(*Call, D, Bldr, Pred, State);
+ ctuBifurcate(Call, D, Bldr, Pred, State);
return;
}
}
@@ -1261,10 +1263,10 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
// If we can't inline it, clean up the state traits used only if the function
// is inlined.
State = removeStateTraitsUsedForArrayEvaluation(
- State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
+ State, dyn_cast_or_null<CXXConstructExpr>(E), Call.getLocationContext());
// Also handle the return value and invalidate the regions.
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
}
void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 5a4e805..dad3d0da 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
case 'F':
TM |= TypeModifier::Float;
break;
+ case 'Y':
+ TM |= TypeModifier::BFloat;
+ break;
case 'S':
TM |= TypeModifier::LMUL1;
break;
@@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
ElementBitwidth *= 2;
LMUL.MulLog2LMUL(1);
Scale = LMUL.getScale(ElementBitwidth);
+ if (ScalarType == ScalarTypeKind::BFloat)
+ ScalarType = ScalarTypeKind::Float;
break;
case VectorTypeModifier::Widening4XVector:
ElementBitwidth *= 4;
diff --git a/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl b/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl
index 43d8dde..61d5e5a 100644
--- a/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/ByteAddressBuffers-AST.hlsl
@@ -142,5 +142,19 @@ RESOURCE Buffer;
// CHECK-NEXT: DeclRefExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue Var {{.*}} 'tmp' 'hlsl::[[RESOURCE]]'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+// GetDimensions method
+
+// CHECK-NEXT: CXXMethodDecl {{.*}} GetDimensions 'void (out unsigned int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} dim 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getdimensions_x' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'dim' 'unsigned int &__restrict'
+// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+
// CHECK-NOSUBSCRIPT-NOT: CXXMethodDecl {{.*}} operator[] 'const char8_t &(unsigned int) const'
// CHECK-NOSUBSCRIPT-NOT: CXXMethodDecl {{.*}} operator[] 'char8_t &(unsigned int)'
diff --git a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
index e72207e..7a8c57c 100644
--- a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
@@ -408,6 +408,28 @@ RESOURCE<float> Buffer;
// CHECK-CONSUME-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-CONSUME-NEXT: IntegerLiteral {{.*}} 'int' -1
+// GetDimensions method
+
+// CHECK: CXXMethodDecl {{.*}} GetDimensions 'void (out unsigned int, out unsigned int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} numStructs 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: ParmVarDecl {{.*}} stride 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getdimensions_x' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'numStructs' 'unsigned int &__restrict'
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getstride' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'stride' 'unsigned int &__restrict'
+// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+
// CHECK: ClassTemplateSpecializationDecl {{.*}} class [[RESOURCE]] definition
// CHECK: TemplateArgument type 'float'
// CHECK-NEXT: BuiltinType {{.*}} 'float'
diff --git a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
index 5182ce1..14e274d 100644
--- a/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/TypedBuffers-AST.hlsl
@@ -214,6 +214,20 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'Index' 'unsigned int'
// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+// GetDimensions method
+
+// CHECK-NEXT: CXXMethodDecl {{.*}} GetDimensions 'void (out unsigned int)'
+// CHECK-NEXT: ParmVarDecl {{.*}} dim 'unsigned int &__restrict'
+// CHECK-NEXT: HLSLParamModifierAttr {{.*}} out
+// CHECK-NEXT: CompoundStmt
+// CHECK-NEXT: CallExpr {{.*}} 'void'
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'void (*)(...) noexcept' <BuiltinFnToFnPtr>
+// CHECK-NEXT: DeclRefExpr {{.*}} '<builtin fn type>' Function {{.*}} '__builtin_hlsl_resource_getdimensions_x' 'void (...) noexcept'
+// CHECK-NEXT: MemberExpr {{.*}} '__hlsl_resource_t {{.*}}' lvalue .__handle {{.*}}
+// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-NEXT: DeclRefExpr {{.*}} 'unsigned int' ParmVar {{.*}} 'dim' 'unsigned int &__restrict'
+// CHECK-NEXT: AlwaysInlineAttr {{.*}} Implicit always_inline
+
// CHECK: ClassTemplateSpecializationDecl {{.*}} class [[RESOURCE]] definition
// CHECK: TemplateArgument type 'float'
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000..d7734e0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000..68814f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000..616455d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000..eec662a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000..dfdeb4e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000..96221c5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000..8f8d82ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000..f4644df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000..07053afa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000..88fb329
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000..d80ec3d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
@@ -0,0 +1,189 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f_s_bf16mf4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f_s_bf16mf2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f_s_bf16m1_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f_s_bf16m2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f_s_bf16m4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f_s_bf16m8_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8(src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000..a5afab9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..70c377b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..854e986
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000..1848488
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000..e519e5a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000..47e1f44
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000..4b55b64
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000..1ffee73
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..964c486
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000..c7c3869e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000..778b8b83
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000..7de3089
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000..5fa285cc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000..b94d26b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000..06e8b49
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000..2423b0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000..24d34f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000..fb3e003
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000..be09003
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000..7490813
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000..6783ba4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
@@ -0,0 +1,455 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000..6127a94
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000..f37dd31
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000..510ff91
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000..669d042
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000..b169efd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000..9aea7d2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000..40f0c27
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000..f64eee3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000..809ea56
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
new file mode 100644
index 0000000..9d6b071c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
new file mode 100644
index 0000000..2760f85
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
new file mode 100644
index 0000000..ae3f1f2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
new file mode 100644
index 0000000..db2184c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
new file mode 100644
index 0000000..66497bf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
new file mode 100644
index 0000000..1dc290b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
new file mode 100644
index 0000000..1564d11
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
new file mode 100644
index 0000000..0384e7d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
new file mode 100644
index 0000000..306f189
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
new file mode 100644
index 0000000..fffd83a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
new file mode 100644
index 0000000..f85378f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f(src);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
new file mode 100644
index 0000000..fb635d6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..1ad856d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..12d0893
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000..6f7928b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000..97d2070
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000..404b4f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000..3a520dd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
new file mode 100644
index 0000000..462b6ac
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..051fde7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
new file mode 100644
index 0000000..0494182
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000..615dedd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000..a895e5f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000..0187516
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000..4a76894
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000..f9f2dc0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
new file mode 100644
index 0000000..ebcf6fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
new file mode 100644
index 0000000..124e7fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000..0399a63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000..2eb7fc8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000..28f5076
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
new file mode 100644
index 0000000..8de49fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
@@ -0,0 +1,451 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000..7836931
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000..ca936af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
new file mode 100644
index 0000000..2e22e22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
new file mode 100644
index 0000000..29881c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
new file mode 100644
index 0000000..b8083c5e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
new file mode 100644
index 0000000..b8749b3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
new file mode 100644
index 0000000..724608c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
new file mode 100644
index 0000000..1b0b898
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
new file mode 100644
index 0000000..672c150
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000..6d55279
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000..8e6946d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000..2d4e481
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000..511e073
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000..f3698d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000..bcaf2cb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000..911f879
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000..9575ad3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000..8e382f71
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000..716f056
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000..069ee6a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000..36d4fc3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
@@ -0,0 +1,1577 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..8406684
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..4644eff
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000..93fd6ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000..d7e6b82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000..e0c289d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000..05ccda3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000..3123692
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..8436f0e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000..7dd2bb6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000..b39a0be
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000..7542e78
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000..104149e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000..228dc1cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000..9e6ff2b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000..b6fd94e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000..4bee376
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000..9151319
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000..f67b100
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000..6d78c74
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000..9fcfe81
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
@@ -0,0 +1,1015 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000..73cc822
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000..6133230
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000..9d9b0b0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000..b96aae5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000..47d0427
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000..0a0ead2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000..27ddefe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000..d5f4f77
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000..c2df947
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
new file mode 100644
index 0000000..2bd3b39
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
new file mode 100644
index 0000000..e2a993a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
new file mode 100644
index 0000000..eb74271
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
new file mode 100644
index 0000000..68d490d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
new file mode 100644
index 0000000..5f682e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
new file mode 100644
index 0000000..9593ad5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
new file mode 100644
index 0000000..f3ef3c3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
new file mode 100644
index 0000000..0587c57
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
new file mode 100644
index 0000000..2ad26f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
new file mode 100644
index 0000000..d1e726a9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
new file mode 100644
index 0000000..9fd1ffc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
new file mode 100644
index 0000000..c6cd0a5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
@@ -0,0 +1,1539 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000..0745633
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000..b906c5f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000..cc487b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000..f9c348b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000..83d35e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000..f5282a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
new file mode 100644
index 0000000..f8e5a33
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000..7c6c926
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
new file mode 100644
index 0000000..c09caeb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000..c1f69932
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000..1b799d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000..9c5f2af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000..691302e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000..1238d22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
new file mode 100644
index 0000000..ea4f8f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
new file mode 100644
index 0000000..e5b7b8d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000..7300104
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000..b05f8802
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000..93721f6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
new file mode 100644
index 0000000..4a2b5e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
@@ -0,0 +1,975 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000..57e43344
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000..42da060
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
new file mode 100644
index 0000000..1378bc9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
new file mode 100644
index 0000000..3945f82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
new file mode 100644
index 0000000..82586da
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
new file mode 100644
index 0000000..75ccbbc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
new file mode 100644
index 0000000..49ff1c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
new file mode 100644
index 0000000..24b3f9c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
new file mode 100644
index 0000000..ca3e134
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index b798618..a505d70 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -1038,6 +1038,7 @@ __m256i test_mm256_mulhrs_epi16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mulhrs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mulhrs_epi16((__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, +18, -20, -21, -22, -22, +21, +20, -18, -16, +13, +9, -5));
__m256i test_mm256_mullo_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_mullo_epi16
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index fddf17d..55bf482 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -1596,18 +1596,24 @@ __m512i test_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
return _mm512_mulhrs_epi16(__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mulhrs_epi16((__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, -83, +82, +81, -79, -77, +74, +70, -66, -61, +56, +49, -43, -35, +27, +19, -10));
+
__m512i test_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mulhrs_epi16(__W,__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_mask_mulhrs_epi16(_mm512_set1_epi16(1), 0x0000FFFF, (__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1, +1));
+
__m512i test_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_maskz_mulhrs_epi16
// CHECK: @llvm.x86.avx512.pmul.hr.sw.512
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mulhrs_epi16(__U,__A,__B);
}
+TEST_CONSTEXPR(match_v32hi(_mm512_maskz_mulhrs_epi16(0x0000FFFF, (__m512i)(__v32hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600, -1700, -1800, +1900, +2000, -2100, -2200, +2300, +2400, -2500, -2600, +2700, +2800, -2900, -3000, +3100, +3200}, (__m512i)(__v32hi){+3200, -3100, +3000, -2900, +2800, -2700, +2600, -2500, +2400, -2300, +2200, -2100, +2000, -1900, +1800, -1700, +1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +10, -19, -27, +35, +43, -49, -56, -61, -66, +70, +74, -77, -79, +81, +82, -83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m512i test_mm512_mulhi_epi16(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mulhi_epi16
// CHECK: @llvm.x86.avx512.pmulh.w.512
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index d569283..95e4d40 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -2061,6 +2061,7 @@ __m128i test_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mask_mulhrs_epi16(_mm_set1_epi16(1), 0x0F, (__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, +1, +1, +1, +1));
__m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
// CHECK-LABEL: test_mm_maskz_mulhrs_epi16
@@ -2068,6 +2069,7 @@ __m128i test_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mulhrs_epi16(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v8hi(_mm_maskz_mulhrs_epi16(0x0F, (__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, 0, 0, 0, 0));
__m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_mask_mulhrs_epi16
@@ -2075,6 +2077,7 @@ __m256i test_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mulhrs_epi16(__W, __U, __X, __Y);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_mask_mulhrs_epi16(_mm256_set1_epi16(1), 0xF00F, (__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, +1, +1, +1, +1, +1, +1, +1, +1, -16, +13, +9, -5));
__m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_maskz_mulhrs_epi16
@@ -2082,6 +2085,7 @@ __m256i test_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mulhrs_epi16(__U, __X, __Y);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_maskz_mulhrs_epi16(0xF00F, (__m256i)(__v16hi){+100, +200, -300, -400, +500, +600, -700, +800, -900, -1000, +1100, +1200, -1300, -1400, +1500, +1600}, (__m256i)(__v16hi){+1600, -1500, +1400, -1300, +1200, -1100, +1000, -900, +800, -700, +600, -500, +400, -300, +200, -100}), +5, -9, -13, +16, 0, 0, 0, 0, 0, 0, 0, 0, -16, +13, +9, -5));
__m128i test_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_mulhi_epu16
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index d9041d4..c1ac57b 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -438,6 +438,7 @@ __m64 test_mm_mulhrs_pi16(__m64 a, __m64 b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(
return _mm_mulhrs_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_mulhrs_pi16((__m64)(__v4hi){+100, +200, -300, -400}, (__m64)(__v4hi){+30000, -20000, +10000, -5000}), +92, -122, -92, +61));
__m64 test_mm_mullo_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mullo_pi16
diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c b/clang/test/CodeGen/X86/ssse3-builtins.c
index 32abd9d..f70afc0 100644
--- a/clang/test/CodeGen/X86/ssse3-builtins.c
+++ b/clang/test/CodeGen/X86/ssse3-builtins.c
@@ -110,6 +110,7 @@ __m128i test_mm_mulhrs_epi16(__m128i a, __m128i b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mulhrs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_mulhrs_epi16((__m128i)(__v8hi){+100, +200, -300, -400, +500, +600, -700, +800}, (__m128i)(__v8hi){+8000, -7000, +6000, -5000, +4000, -3000, +2000, -1000}), +24, -43, -55, +61, +61, -55, -43, -24));
__m128i test_mm_shuffle_epi8(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_shuffle_epi8
diff --git a/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl b/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl
new file mode 100644
index 0000000..9dd0228
--- /dev/null
+++ b/clang/test/CodeGenHLSL/resources/ByteAddressBuffers-methods.hlsl
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -emit-llvm -disable-llvm-passes -o - %s | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,DXIL
+// RUN-DISABLED: %clang_cc1 -triple spirv-vulkan-library -finclude-default-header -emit-llvm -disable-llvm-passes -o - %s | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,SPIRV
+
+// NOTE: SPIRV codegen for resource methods is not yet implemented
+
+ByteAddressBuffer Buf : register(t0);
+RWByteAddressBuffer RWBuf : register(u0);
+
+// DXIL: %"class.hlsl::ByteAddressBuffer" = type { target("dx.RawBuffer", i8, 0, 0) }
+// DXIL: %"class.hlsl::RWByteAddressBuffer" = type { target("dx.RawBuffer", i8, 1, 0) }
+
+// DXIL: @Buf = internal global %"class.hlsl::ByteAddressBuffer" poison
+// DXIL: @RWBuf = internal global %"class.hlsl::RWByteAddressBuffer" poison
+
+export uint TestGetDimensions() {
+ uint dim1, dim2;
+ Buf.GetDimensions(dim1);
+ RWBuf.GetDimensions(dim2);
+ return dim1 + dim2;
+}
+
+// CHECK: define {{.*}} @TestGetDimensions()()
+// CHECK: call void @hlsl::ByteAddressBuffer::GetDimensions(unsigned int&)(ptr {{.*}} @Buf, ptr{{.*}})
+// CHECK: call void @hlsl::RWByteAddressBuffer::GetDimensions(unsigned int&)(ptr{{.*}} @RWBuf, ptr{{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::ByteAddressBuffer::GetDimensions(unsigned int&)(ptr {{.*}} %this, {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::ByteAddressBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", i8, 0, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_0_0t(target("dx.RawBuffer", i8, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RWByteAddressBuffer::GetDimensions(unsigned int&)(ptr {{.*}} %this, ptr noalias {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWByteAddressBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", i8, 1, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_1_0t(target("dx.RawBuffer", i8, 1, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_0_0t(target("dx.RawBuffer", i8, 0, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_i8_1_0t(target("dx.RawBuffer", i8, 1, 0))
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
index 2cf6a10..1f248d0 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
@@ -104,9 +104,57 @@ export float TestLoad() {
// CHECK-NEXT: %[[VAL:.*]] = load float, ptr %[[PTR]]
// CHECK-NEXT: ret float %[[VAL]]
+export uint TestGetDimensions() {
+ uint dim1, dim2, dim3, stride1, stride2, stride3;
+ SB1.GetDimensions(dim1, stride1);
+ RWSB2.GetDimensions(dim2, stride2);
+ CSB.GetDimensions(dim3, stride3);
+ return dim1 + dim2 + dim3 + stride1 + stride2 + stride3;
+}
+// CHECK: define noundef i32 @TestGetDimensions()()
+// CHECK: call void @hlsl::StructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @SB1, ptr {{.*}}, ptr {{.*}})
+// CHECK: call void @hlsl::RWStructuredBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @RWSB2, ptr {{.*}}, ptr {{.*}})
+// CHECK: call void @hlsl::ConsumeStructuredBuffer<double>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @CSB, ptr {{.*}}, ptr {{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::StructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}}, ptr {{.*}} %numStructs, ptr {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::StructuredBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", float, 0, 0), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_0_0t(target("dx.RawBuffer", float, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 4, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RWStructuredBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} %this, {{.*}} %numStructs, {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::RWStructuredBuffer.0", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", <4 x i32>, 1, 0), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v4i32_1_0t(target("dx.RawBuffer", <4 x i32>, 1, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 16, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::ConsumeStructuredBuffer<double>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} %this, {{.*}} %numStructs, {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::ConsumeStructuredBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", double, 1, 0), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 8, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_f32_1_0t(target("dx.RawBuffer", float, 1, 0), i8)
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_v4i32_1_0t(target("dx.RawBuffer", <4 x i32>, 1, 0), i8)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f32_1_0t(target("dx.RawBuffer", float, 1, 0), i32)
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0), i8)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0), i32)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f32_0_0t(target("dx.RawBuffer", float, 0, 0), i32)
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_0_0t(target("dx.RawBuffer", float, 0, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v4i32_1_0t(target("dx.RawBuffer", <4 x i32>, 1, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f64_1_0t(target("dx.RawBuffer", double, 1, 0))
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
index 47c1d0d..25fa759 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
@@ -65,7 +65,42 @@ export float TestLoad() {
// CHECK-NEXT: %[[VAL:.*]] = load <2 x i32>, ptr %[[BUFPTR]]
// CHECK-NEXT: ret <2 x i32> %[[VAL]]
+export uint TestGetDimensions() {
+ uint dim1, dim2, stride1, stride2;
+ ROSB1.GetDimensions(dim1, stride1);
+ ROSB2.GetDimensions(dim2, stride2);
+ return dim1 + dim2 + stride1 + stride2;
+}
+// CHECK: define noundef i32 @TestGetDimensions()()
+// CHECK: call void @hlsl::RasterizerOrderedStructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @ROSB1, ptr {{.*}}, ptr {{.*}})
+// CHECK: call void @hlsl::RasterizerOrderedStructuredBuffer<int vector[2]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}} @ROSB2, ptr {{.*}}, ptr {{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::RasterizerOrderedStructuredBuffer<float>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}}, ptr {{.*}} %numStructs, ptr {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::RasterizerOrderedStructuredBuffer", ptr %{{.*}}, i32 0, i32 0
+// DXIL-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", float, 1, 1), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 4, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RasterizerOrderedStructuredBuffer<int vector[2]>::GetDimensions(unsigned int&, unsigned int&)(ptr {{.*}}, ptr {{.*}} %numStructs, ptr {{.*}} %stride)
+// CHECK: %__handle = getelementptr inbounds nuw %"class.hlsl::RasterizerOrderedStructuredBuffer.0", ptr %{{.*}}, i32 0, i32 0
+// DXIL-NEXT: %[[HANDLE:.*]] = load target("dx.RawBuffer", <2 x i32>, 1, 1), ptr %__handle
+// CHECK-NEXT: %[[NUMSTRUCTS_PTR:.*]] = load ptr, ptr %numStructs.addr
+// DXIL-NEXT: %[[NUMSTRUCTS:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[NUMSTRUCTS]], ptr %[[NUMSTRUCTS_PTR]]
+// CHECK-NEXT: %[[STRIDEPTR:.*]] = load ptr, ptr %stride.addr
+// CHECK-NEXT: store i32 8, ptr %[[STRIDEPTR]]
+// CHECK-NEXT: ret void
+
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1), i8)
// DXIL: declare i32 @llvm.dx.resource.updatecounter.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1), i8)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1), i32)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1), i32)
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_f32_1_1t(target("dx.RawBuffer", float, 1, 1))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.RawBuffer_v2i32_1_1t(target("dx.RawBuffer", <2 x i32>, 1, 1))
diff --git a/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl b/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl
index b153bda..fdc1ef0 100644
--- a/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl
+++ b/clang/test/CodeGenHLSL/resources/TypedBuffers-methods.hlsl
@@ -38,5 +38,37 @@ export float TestLoad() {
// CHECK-NEXT: %[[VEC:.*]] = load <4 x i32>, ptr %[[PTR]]
// CHECK-NEXT: ret <4 x i32> %[[VEC]]
+export uint TestGetDimensions() {
+ uint dim1, dim2;
+ Buf.GetDimensions(dim1);
+ RWBuf.GetDimensions(dim2);
+ return dim1 + dim2;
+}
+
+// CHECK: @TestGetDimensions()()
+// CHECK: call void @hlsl::Buffer<float>::GetDimensions(unsigned int&)(ptr {{.*}} @Buf, ptr {{.*}})
+// CHECK: call void @hlsl::RWBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&)(ptr {{.*}} @RWBuf, ptr {{.*}})
+// CHECK: add
+// CHECK: ret
+
+// CHECK: define {{.*}} void @hlsl::Buffer<float>::GetDimensions(unsigned int&)(ptr {{.*}} %this, ptr noalias {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::Buffer", ptr %this1, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.TypedBuffer", float, 0, 0, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_f32_0_0_0t(target("dx.TypedBuffer", float, 0, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
+// CHECK: define {{.*}} void @hlsl::RWBuffer<unsigned int vector[4]>::GetDimensions(unsigned int&)(ptr {{.*}} %this, {{.*}} %dim)
+// CHECK: %[[HANDLE_PTR:.*]] = getelementptr inbounds nuw %"class.hlsl::RWBuffer", ptr %{{.*}}, i32 0, i32 0
+// CHECK-NEXT: %[[HANDLE:.*]] = load target("dx.TypedBuffer", <4 x i32>, 1, 0, 0), ptr %[[HANDLE_PTR]]
+// CHECK-NEXT: %[[DIMPTR:.*]] = load ptr, ptr %dim.addr
+// DXIL-NEXT: %[[DIM:.*]] = call i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_v4i32_1_0_0t(target("dx.TypedBuffer", <4 x i32>, 1, 0, 0) %[[HANDLE]])
+// CHECK-NEXT: store i32 %[[DIM]], ptr %[[DIMPTR]]
+// CHECK-NEXT: ret void
+
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.TypedBuffer_f32_0_0_0t(target("dx.TypedBuffer", float, 0, 0, 0), i32)
// DXIL: declare ptr @llvm.dx.resource.getpointer.p0.tdx.TypedBuffer_v4i32_1_0_0t(target("dx.TypedBuffer", <4 x i32>, 1, 0, 0), i32)
+
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_f32_0_0_0t(target("dx.TypedBuffer", float, 0, 0, 0))
+// DXIL: declare i32 @llvm.dx.resource.getdimensions.x.tdx.TypedBuffer_v4i32_1_0_0t(target("dx.TypedBuffer", <4 x i32>, 1, 0, 0))
diff --git a/clang/test/Driver/linker-wrapper.c b/clang/test/Driver/linker-wrapper.c
index 52a961d..39b9bcd 100644
--- a/clang/test/Driver/linker-wrapper.c
+++ b/clang/test/Driver/linker-wrapper.c
@@ -102,7 +102,7 @@ __attribute__((visibility("protected"), used)) int x;
// CUDA: clang{{.*}} -o [[IMG_SM70:.+]] -dumpdir a.out.nvptx64.sm_70.img. --target=nvptx64-nvidia-cuda -march=sm_70
// CUDA: clang{{.*}} -o [[IMG_SM52:.+]] -dumpdir a.out.nvptx64.sm_52.img. --target=nvptx64-nvidia-cuda -march=sm_52
-// CUDA: fatbinary{{.*}}-64 --create {{.*}}.fatbin --image=profile=sm_70,file=[[IMG_SM70]] --image=profile=sm_52,file=[[IMG_SM52]]
+// CUDA: fatbinary{{.*}}-64 --create {{.*}}.fatbin --image3=kind=elf,sm=70,file=[[IMG_SM70]] --image3=kind=elf,sm=52,file=[[IMG_SM52]]
// CUDA: usr/bin/ld{{.*}} {{.*}}.openmp.image.{{.*}}.o {{.*}}.cuda.image.{{.*}}.o
// RUN: llvm-offload-binary -o %t.out \
@@ -236,7 +236,7 @@ __attribute__((visibility("protected"), used)) int x;
// RUN: %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=RELOCATABLE-LINK-CUDA
// RELOCATABLE-LINK-CUDA: clang{{.*}} -o {{.*}}.img -dumpdir a.out.nvptx64.sm_89.img. --target=nvptx64-nvidia-cuda
-// RELOCATABLE-LINK-CUDA: fatbinary{{.*}} -64 --create {{.*}}.fatbin --image=profile=sm_89,file={{.*}}.img
+// RELOCATABLE-LINK-CUDA: fatbinary{{.*}} -64 --create {{.*}}.fatbin --image3=kind=elf,sm=89,file={{.*}}.img
// RELOCATABLE-LINK-CUDA: /usr/bin/ld.lld{{.*}}-r
// RELOCATABLE-LINK-CUDA: llvm-objcopy{{.*}}a.out --remove-section .llvm.offloading
diff --git a/clang/test/Parser/c2x-auto.c b/clang/test/Parser/c2x-auto.c
index b878a5b..7f80b07 100644
--- a/clang/test/Parser/c2x-auto.c
+++ b/clang/test/Parser/c2x-auto.c
@@ -130,3 +130,30 @@ void atomic(void) {
void attributes(void) {
auto ident [[clang::annotate("this works")]] = 12; // c17-error {{type specifier missing, defaults to 'int'; ISO C99 and later do not support implicit int}}
}
+
+/** GH163090 */
+constexpr auto int a1 = 0; // c23-error {{illegal storage class on file-scoped variable}} \
+ c23-error {{cannot combine with previous 'auto' declaration specifier}} \
+ c17-error {{illegal storage class on file-scoped variable}} \
+ c17-error {{unknown type name 'constexpr'}}
+
+constexpr int auto a2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}} \
+ c17-error {{illegal storage class on file-scoped variable}} \
+ c17-error {{unknown type name 'constexpr'}}
+
+auto int b1 = 0; // c23-error {{illegal storage class on file-scoped variable}} \
+ c17-error {{illegal storage class on file-scoped variable}}
+
+int auto b2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}} \
+ c17-error {{illegal storage class on file-scoped variable}}
+
+void f() {
+ constexpr auto int c1 = 0; // c23-error {{cannot combine with previous 'auto' declaration specifier}} \
+ c17-error {{use of undeclared identifier 'constexpr'}}
+
+ constexpr int auto c2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}} \
+ c17-error {{use of undeclared identifier 'constexpr'}}
+
+ auto int d1 = 0;
+ int auto d2 = 0; // c23-error {{cannot combine with previous 'int' declaration specifier}}
+}
diff --git a/clang/test/Sema/attr-print.c b/clang/test/Sema/attr-print.c
index 8492356..211e61a 100644
--- a/clang/test/Sema/attr-print.c
+++ b/clang/test/Sema/attr-print.c
@@ -35,3 +35,6 @@ int * __sptr * __ptr32 ppsp32;
// CHECK: __attribute__((availability(macos, strict, introduced=10.6)));
void f6(int) __attribute__((availability(macosx,strict,introduced=10.6)));
+
+// CHECK: _libc_intl_domainname asm("__gi__libc_intl_domainname") __attribute__((visibility("hidden")));
+extern const char _libc_intl_domainname[]; extern typeof (_libc_intl_domainname) _libc_intl_domainname asm("__gi__libc_intl_domainname") __attribute__((visibility("hidden")));
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index aaa20f6..a54bc02 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1514,6 +1514,31 @@ static_assert( requires {{ &f } -> C;} ); // expected-error {{reference to overl
}
+namespace GH162092 {
+
+template <typename T>
+struct vector;
+
+template <typename T, typename U>
+concept C = __is_same_as(T, U);
+
+template<class T, auto Cpt>
+concept generic_range_value = requires {
+ Cpt.template operator()<int>();
+};
+
+
+template<generic_range_value<[]<
+ C<int>
+ >() {}> T>
+void x() {}
+
+void foo() {
+ x<vector<int>>();
+}
+
+}
+
namespace GH162770 {
enum e {};
template<e> struct s {};
diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
index 4d5b956..bfeca17 100644
--- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
+++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
@@ -396,8 +396,8 @@ fatbinary(ArrayRef<std::pair<StringRef, StringRef>> InputFiles,
CmdArgs.push_back("--create");
CmdArgs.push_back(*TempFileOrErr);
for (const auto &[File, Arch] : InputFiles)
- CmdArgs.push_back(
- Args.MakeArgString("--image=profile=" + Arch + ",file=" + File));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--image3=kind=elf,sm=" + Arch.drop_front(3) + ",file=" + File));
if (Error Err = executeCommands(*FatBinaryPath, CmdArgs))
return std::move(Err);
diff --git a/clang/unittests/Format/AlignBracketsTest.cpp b/clang/unittests/Format/AlignBracketsTest.cpp
index c4380ae..ea8db51 100644
--- a/clang/unittests/Format/AlignBracketsTest.cpp
+++ b/clang/unittests/Format/AlignBracketsTest.cpp
@@ -778,6 +778,19 @@ TEST_F(AlignBracketsTest, ParenthesesAndOperandAlignment) {
Style);
}
+TEST_F(AlignBracketsTest, BlockIndentAndNamespace) {
+ auto Style = getLLVMStyleWithColumns(120);
+ Style.AllowShortNamespacesOnASingleLine = true;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_BlockIndent;
+
+ verifyNoCrash(
+ "namespace {\n"
+ "void xxxxxxxxxxxxxxxxxxxxx(nnnnn::TTTTTTTTTTTTT const *mmmm,\n"
+ " YYYYYYYYYYYYYYYYY &yyyyyyyyyyyyyy);\n"
+ "} //",
+ Style);
+}
+
} // namespace
} // namespace test
} // namespace format
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 1152466..1002515 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -1129,6 +1129,11 @@ TEST_F(TokenAnnotatorTest, UnderstandsOverloadedOperators) {
ASSERT_EQ(Tokens.size(), 7u) << Tokens;
// Not TT_FunctionDeclarationName.
EXPECT_TOKEN(Tokens[3], tok::kw_operator, TT_Unknown);
+
+ Tokens = annotate("SomeAPI::operator()();");
+ ASSERT_EQ(Tokens.size(), 9u) << Tokens;
+ // Not TT_FunctionDeclarationName.
+ EXPECT_TOKEN(Tokens[2], tok::kw_operator, TT_Unknown);
}
TEST_F(TokenAnnotatorTest, OverloadedOperatorInTemplate) {
diff --git a/compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp b/compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp
new file mode 100644
index 0000000..6f02814
--- /dev/null
+++ b/compiler-rt/test/asan/TestCases/Windows/basic_exception_handling.cpp
@@ -0,0 +1,33 @@
+// RUN: %clangxx_asan %s -o %t
+// RUN: %run %t | FileCheck %s
+
+// This test tests that declaring a parameter in a catch-block does not produce a false positive
+// ASan error on Windows.
+
+// This code is based on the repro in https://github.com/google/sanitizers/issues/749
+#include <cstdio>
+#include <exception>
+
+void throwInFunction() { throw std::exception("test2"); }
+
+int main() {
+ // case 1: direct throw
+ try {
+ throw std::exception("test1");
+ } catch (const std::exception &ex) {
+ puts(ex.what());
+ // CHECK: test1
+ }
+
+ // case 2: throw in function
+ try {
+ throwInFunction();
+ } catch (const std::exception &ex) {
+ puts(ex.what());
+ // CHECK: test2
+ }
+
+ printf("Success!\n");
+ // CHECK: Success!
+ return 0;
+}
diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td
index e2f0920..bfbaa5f 100644
--- a/flang/include/flang/Optimizer/OpenMP/Passes.td
+++ b/flang/include/flang/Optimizer/OpenMP/Passes.td
@@ -93,6 +93,10 @@ def LowerWorkshare : Pass<"lower-workshare", "::mlir::ModuleOp"> {
let summary = "Lower workshare construct";
}
+def LowerWorkdistribute : Pass<"lower-workdistribute", "::mlir::ModuleOp"> {
+ let summary = "Lower workdistribute construct";
+}
+
def GenericLoopConversionPass
: Pass<"omp-generic-loop-conversion", "mlir::func::FuncOp"> {
let summary = "Converts OpenMP generic `omp.loop` to semantically "
diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
index b85ee7e..23a7dc8 100644
--- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt
+++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt
@@ -8,6 +8,7 @@ add_flang_library(FlangOpenMPTransforms
MapsForPrivatizedSymbols.cpp
MapInfoFinalization.cpp
MarkDeclareTarget.cpp
+ LowerWorkdistribute.cpp
LowerWorkshare.cpp
LowerNontemporal.cpp
SimdOnly.cpp
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
new file mode 100644
index 0000000..9278e17
--- /dev/null
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
@@ -0,0 +1,1852 @@
+//===- LowerWorkdistribute.cpp
+//-------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the lowering and optimisations of omp.workdistribute.
+//
+// Fortran array statements are lowered to fir as fir.do_loop unordered.
+// lower-workdistribute pass works mainly on identifying fir.do_loop unordered
+// that is nested in target{teams{workdistribute{fir.do_loop unordered}}} and
+// lowers it to target{teams{parallel{distribute{wsloop{loop_nest}}}}}.
+// It hoists all the other ops outside target region.
+// Relaces heap allocation on target with omp.target_allocmem and
+// deallocation with omp.target_freemem from host. Also replaces
+// runtime function "Assign" with omp_target_memcpy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROps.h"
+#include "flang/Optimizer/Dialect/FIRType.h"
+#include "flang/Optimizer/HLFIR/Passes.h"
+#include "flang/Optimizer/OpenMP/Utils.h"
+#include "flang/Optimizer/Transforms/Passes.h"
+#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Value.h"
+#include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+#include "mlir/Transforms/RegionUtils.h"
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include <mlir/Dialect/Arith/IR/Arith.h>
+#include <mlir/Dialect/LLVMIR/LLVMTypes.h>
+#include <mlir/Dialect/Utils/IndexingUtils.h>
+#include <mlir/IR/BlockSupport.h>
+#include <mlir/IR/BuiltinOps.h>
+#include <mlir/IR/Diagnostics.h>
+#include <mlir/IR/IRMapping.h>
+#include <mlir/IR/PatternMatch.h>
+#include <mlir/Interfaces/SideEffectInterfaces.h>
+#include <mlir/Support/LLVM.h>
+#include <optional>
+#include <variant>
+
+namespace flangomp {
+#define GEN_PASS_DEF_LOWERWORKDISTRIBUTE
+#include "flang/Optimizer/OpenMP/Passes.h.inc"
+} // namespace flangomp
+
+#define DEBUG_TYPE "lower-workdistribute"
+
+using namespace mlir;
+
+namespace {
+
+/// This string is used to identify the Fortran-specific runtime FortranAAssign.
+static constexpr llvm::StringRef FortranAssignStr = "_FortranAAssign";
+
+/// The isRuntimeCall function is a utility designed to determine
+/// if a given operation is a call to a Fortran-specific runtime function.
+static bool isRuntimeCall(Operation *op) {
+ if (auto callOp = dyn_cast<fir::CallOp>(op)) {
+ auto callee = callOp.getCallee();
+ if (!callee)
+ return false;
+ auto *func = op->getParentOfType<ModuleOp>().lookupSymbol(*callee);
+ if (func->getAttr(fir::FIROpsDialect::getFirRuntimeAttrName()))
+ return true;
+ }
+ return false;
+}
+
+/// This is the single source of truth about whether we should parallelize an
+/// operation nested in an omp.workdistribute region.
+/// Parallelize here refers to dividing into units of work.
+static bool shouldParallelize(Operation *op) {
+ // True if the op is a runtime call to Assign
+ if (isRuntimeCall(op)) {
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ return true;
+ }
+ }
+ // We cannot parallelize ops with side effects.
+ // Parallelizable operations should not produce
+ // values that other operations depend on
+ if (llvm::any_of(op->getResults(),
+ [](OpResult v) -> bool { return !v.use_empty(); }))
+ return false;
+ // We will parallelize unordered loops - these come from array syntax
+ if (auto loop = dyn_cast<fir::DoLoopOp>(op)) {
+ auto unordered = loop.getUnordered();
+ if (!unordered)
+ return false;
+ return *unordered;
+ }
+ // We cannot parallelize anything else.
+ return false;
+}
+
+/// The getPerfectlyNested function is a generic utility for finding
+/// a single, "perfectly nested" operation within a parent operation.
+template <typename T>
+static T getPerfectlyNested(Operation *op) {
+ if (op->getNumRegions() != 1)
+ return nullptr;
+ auto &region = op->getRegion(0);
+ if (region.getBlocks().size() != 1)
+ return nullptr;
+ auto *block = &region.front();
+ auto *firstOp = &block->front();
+ if (auto nested = dyn_cast<T>(firstOp))
+ if (firstOp->getNextNode() == block->getTerminator())
+ return nested;
+ return nullptr;
+}
+
+/// verifyTargetTeamsWorkdistribute method verifies that
+/// omp.target { teams { workdistribute { ... } } } is well formed
+/// and fails for function calls that don't have lowering implemented yet.
+static LogicalResult
+verifyTargetTeamsWorkdistribute(omp::WorkdistributeOp workdistribute) {
+ OpBuilder rewriter(workdistribute);
+ auto loc = workdistribute->getLoc();
+ auto teams = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp());
+ if (!teams) {
+ emitError(loc, "workdistribute not nested in teams\n");
+ return failure();
+ }
+ if (workdistribute.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "workdistribute with multiple blocks\n");
+ return failure();
+ }
+ if (teams.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "teams with multiple blocks\n");
+ return failure();
+ }
+
+ bool foundWorkdistribute = false;
+ for (auto &op : teams.getOps()) {
+ if (isa<omp::WorkdistributeOp>(op)) {
+ if (foundWorkdistribute) {
+ emitError(loc, "teams has multiple workdistribute ops.\n");
+ return failure();
+ }
+ foundWorkdistribute = true;
+ continue;
+ }
+ // Identify any omp dialect ops present before/after workdistribute.
+ if (op.getDialect() && isa<omp::OpenMPDialect>(op.getDialect()) &&
+ !isa<omp::TerminatorOp>(op)) {
+ emitError(loc, "teams has omp ops other than workdistribute. Lowering "
+ "not implemented yet.\n");
+ return failure();
+ }
+ }
+
+ omp::TargetOp targetOp = dyn_cast<omp::TargetOp>(teams->getParentOp());
+ // return if not omp.target
+ if (!targetOp)
+ return success();
+
+ for (auto &op : workdistribute.getOps()) {
+ if (auto callOp = dyn_cast<fir::CallOp>(op)) {
+ if (isRuntimeCall(&op)) {
+ auto funcName = (*callOp.getCallee()).getRootReference().getValue();
+ // _FortranAAssign is handled. Other runtime calls are not supported
+ // in omp.workdistribute yet.
+ if (funcName == FortranAssignStr)
+ continue;
+ else {
+ emitError(loc, "Runtime call " + funcName +
+ " lowering not supported for workdistribute yet.");
+ return failure();
+ }
+ }
+ }
+ }
+ return success();
+}
+
+/// fissionWorkdistribute method finds the parallelizable ops
+/// within teams {workdistribute} region and moves them to their
+/// own teams{workdistribute} region.
+///
+/// If B() and D() are parallelizable,
+///
+/// omp.teams {
+/// omp.workdistribute {
+/// A()
+/// B()
+/// C()
+/// D()
+/// E()
+/// }
+/// }
+///
+/// becomes
+///
+/// A()
+/// omp.teams {
+/// omp.workdistribute {
+/// B()
+/// }
+/// }
+/// C()
+/// omp.teams {
+/// omp.workdistribute {
+/// D()
+/// }
+/// }
+/// E()
+static FailureOr<bool>
+fissionWorkdistribute(omp::WorkdistributeOp workdistribute) {
+ OpBuilder rewriter(workdistribute);
+ auto loc = workdistribute->getLoc();
+ auto teams = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp());
+ auto *teamsBlock = &teams.getRegion().front();
+ bool changed = false;
+ // Move the ops inside teams and before workdistribute outside.
+ IRMapping irMapping;
+ llvm::SmallVector<Operation *> teamsHoisted;
+ for (auto &op : teams.getOps()) {
+ if (&op == workdistribute) {
+ break;
+ }
+ if (shouldParallelize(&op)) {
+ emitError(loc, "teams has parallelize ops before first workdistribute\n");
+ return failure();
+ } else {
+ rewriter.setInsertionPoint(teams);
+ rewriter.clone(op, irMapping);
+ teamsHoisted.push_back(&op);
+ changed = true;
+ }
+ }
+ for (auto *op : llvm::reverse(teamsHoisted)) {
+ op->replaceAllUsesWith(irMapping.lookup(op));
+ op->erase();
+ }
+
+ // While we have unhandled operations in the original workdistribute
+ auto *workdistributeBlock = &workdistribute.getRegion().front();
+ auto *terminator = workdistributeBlock->getTerminator();
+ while (&workdistributeBlock->front() != terminator) {
+ rewriter.setInsertionPoint(teams);
+ IRMapping mapping;
+ llvm::SmallVector<Operation *> hoisted;
+ Operation *parallelize = nullptr;
+ for (auto &op : workdistribute.getOps()) {
+ if (&op == terminator) {
+ break;
+ }
+ if (shouldParallelize(&op)) {
+ parallelize = &op;
+ break;
+ } else {
+ rewriter.clone(op, mapping);
+ hoisted.push_back(&op);
+ changed = true;
+ }
+ }
+
+ for (auto *op : llvm::reverse(hoisted)) {
+ op->replaceAllUsesWith(mapping.lookup(op));
+ op->erase();
+ }
+
+ if (parallelize && hoisted.empty() &&
+ parallelize->getNextNode() == terminator)
+ break;
+ if (parallelize) {
+ auto newTeams = rewriter.cloneWithoutRegions(teams);
+ auto *newTeamsBlock = rewriter.createBlock(
+ &newTeams.getRegion(), newTeams.getRegion().begin(), {}, {});
+ for (auto arg : teamsBlock->getArguments())
+ newTeamsBlock->addArgument(arg.getType(), arg.getLoc());
+ auto newWorkdistribute = rewriter.create<omp::WorkdistributeOp>(loc);
+ rewriter.create<omp::TerminatorOp>(loc);
+ rewriter.createBlock(&newWorkdistribute.getRegion(),
+ newWorkdistribute.getRegion().begin(), {}, {});
+ auto *cloned = rewriter.clone(*parallelize);
+ parallelize->replaceAllUsesWith(cloned);
+ parallelize->erase();
+ rewriter.create<omp::TerminatorOp>(loc);
+ changed = true;
+ }
+ }
+ return changed;
+}
+
+/// Generate omp.parallel operation with an empty region.
+static void genParallelOp(Location loc, OpBuilder &rewriter, bool composite) {
+ auto parallelOp = rewriter.create<mlir::omp::ParallelOp>(loc);
+ parallelOp.setComposite(composite);
+ rewriter.createBlock(&parallelOp.getRegion());
+ rewriter.setInsertionPoint(rewriter.create<mlir::omp::TerminatorOp>(loc));
+ return;
+}
+
+/// Generate omp.distribute operation with an empty region.
+static void genDistributeOp(Location loc, OpBuilder &rewriter, bool composite) {
+ mlir::omp::DistributeOperands distributeClauseOps;
+ auto distributeOp =
+ rewriter.create<mlir::omp::DistributeOp>(loc, distributeClauseOps);
+ distributeOp.setComposite(composite);
+ auto distributeBlock = rewriter.createBlock(&distributeOp.getRegion());
+ rewriter.setInsertionPointToStart(distributeBlock);
+ return;
+}
+
+/// Generate loop nest clause operands from fir.do_loop operation.
+static void
+genLoopNestClauseOps(OpBuilder &rewriter, fir::DoLoopOp loop,
+ mlir::omp::LoopNestOperands &loopNestClauseOps) {
+ assert(loopNestClauseOps.loopLowerBounds.empty() &&
+ "Loop nest bounds were already emitted!");
+ loopNestClauseOps.loopLowerBounds.push_back(loop.getLowerBound());
+ loopNestClauseOps.loopUpperBounds.push_back(loop.getUpperBound());
+ loopNestClauseOps.loopSteps.push_back(loop.getStep());
+ loopNestClauseOps.loopInclusive = rewriter.getUnitAttr();
+}
+
+/// Generate omp.wsloop operation with an empty region and
+/// clone the body of fir.do_loop operation inside the loop nest region.
+static void genWsLoopOp(mlir::OpBuilder &rewriter, fir::DoLoopOp doLoop,
+ const mlir::omp::LoopNestOperands &clauseOps,
+ bool composite) {
+
+ auto wsloopOp = rewriter.create<mlir::omp::WsloopOp>(doLoop.getLoc());
+ wsloopOp.setComposite(composite);
+ rewriter.createBlock(&wsloopOp.getRegion());
+
+ auto loopNestOp =
+ rewriter.create<mlir::omp::LoopNestOp>(doLoop.getLoc(), clauseOps);
+
+ // Clone the loop's body inside the loop nest construct using the
+ // mapped values.
+ rewriter.cloneRegionBefore(doLoop.getRegion(), loopNestOp.getRegion(),
+ loopNestOp.getRegion().begin());
+ Block *clonedBlock = &loopNestOp.getRegion().back();
+ mlir::Operation *terminatorOp = clonedBlock->getTerminator();
+
+ // Erase fir.result op of do loop and create yield op.
+ if (auto resultOp = dyn_cast<fir::ResultOp>(terminatorOp)) {
+ rewriter.setInsertionPoint(terminatorOp);
+ rewriter.create<mlir::omp::YieldOp>(doLoop->getLoc());
+ terminatorOp->erase();
+ }
+}
+
+/// workdistributeDoLower method finds the fir.do_loop unoredered
+/// nested in teams {workdistribute{fir.do_loop unoredered}} and
+/// lowers it to teams {parallel { distribute {wsloop {loop_nest}}}}.
+///
+/// If fir.do_loop is present inside teams workdistribute
+///
+/// omp.teams {
+/// omp.workdistribute {
+/// fir.do_loop unoredered {
+/// ...
+/// }
+/// }
+/// }
+///
+/// Then, its lowered to
+///
+/// omp.teams {
+/// omp.parallel {
+/// omp.distribute {
+/// omp.wsloop {
+/// omp.loop_nest
+/// ...
+/// }
+/// }
+/// }
+/// }
+/// }
+static bool
+workdistributeDoLower(omp::WorkdistributeOp workdistribute,
+ SetVector<omp::TargetOp> &targetOpsToProcess) {
+ OpBuilder rewriter(workdistribute);
+ auto doLoop = getPerfectlyNested<fir::DoLoopOp>(workdistribute);
+ auto wdLoc = workdistribute->getLoc();
+ if (doLoop && shouldParallelize(doLoop)) {
+ assert(doLoop.getReduceOperands().empty());
+
+ // Record the target ops to process later
+ if (auto teamsOp = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp())) {
+ auto targetOp = dyn_cast<omp::TargetOp>(teamsOp->getParentOp());
+ if (targetOp) {
+ targetOpsToProcess.insert(targetOp);
+ }
+ }
+ // Generate the nested parallel, distribute, wsloop and loop_nest ops.
+ genParallelOp(wdLoc, rewriter, true);
+ genDistributeOp(wdLoc, rewriter, true);
+ mlir::omp::LoopNestOperands loopNestClauseOps;
+ genLoopNestClauseOps(rewriter, doLoop, loopNestClauseOps);
+ genWsLoopOp(rewriter, doLoop, loopNestClauseOps, true);
+ workdistribute.erase();
+ return true;
+ }
+ return false;
+}
+
+/// Check if the enclosed type in fir.ref is fir.box and fir.box encloses array
+static bool isEnclosedTypeRefToBoxArray(Type type) {
+ // Check if it's a reference type
+ if (auto refType = dyn_cast<fir::ReferenceType>(type)) {
+ // Get the referenced type (should be fir.box)
+ auto referencedType = refType.getEleTy();
+ // Check if referenced type is a box
+ if (auto boxType = dyn_cast<fir::BoxType>(referencedType)) {
+ // Get the boxed type and check if it's an array
+ auto boxedType = boxType.getEleTy();
+ // Check if boxed type is a sequence (array)
+ return isa<fir::SequenceType>(boxedType);
+ }
+ }
+ return false;
+}
+
+/// Check if the enclosed type in fir.box is scalar (not array)
+static bool isEnclosedTypeBoxScalar(Type type) {
+ // Check if it's a box type
+ if (auto boxType = dyn_cast<fir::BoxType>(type)) {
+ // Get the boxed type
+ auto boxedType = boxType.getEleTy();
+ // Check if boxed type is NOT a sequence (array)
+ return !isa<fir::SequenceType>(boxedType);
+ }
+ return false;
+}
+
+/// Check if the FortranAAssign call has src as scalar and dest as array
+static bool isFortranAssignSrcScalarAndDestArray(fir::CallOp callOp) {
+ if (callOp.getNumOperands() < 2)
+ return false;
+ auto srcArg = callOp.getOperand(1);
+ auto destArg = callOp.getOperand(0);
+ // Both operands should be fir.convert ops
+ auto srcConvert = srcArg.getDefiningOp<fir::ConvertOp>();
+ auto destConvert = destArg.getDefiningOp<fir::ConvertOp>();
+ if (!srcConvert || !destConvert) {
+ emitError(callOp->getLoc(),
+ "Unimplemented: FortranAssign to OpenMP lowering\n");
+ return false;
+ }
+ // Get the original types before conversion
+ auto srcOrigType = srcConvert.getValue().getType();
+ auto destOrigType = destConvert.getValue().getType();
+
+ // Check if src is scalar and dest is array
+ bool srcIsScalar = isEnclosedTypeBoxScalar(srcOrigType);
+ bool destIsArray = isEnclosedTypeRefToBoxArray(destOrigType);
+ return srcIsScalar && destIsArray;
+}
+
+/// Convert a flat index to multi-dimensional indices for an array box
+/// Example: 2D array with shape (2,4)
+/// Col 1 Col 2 Col 3 Col 4
+/// Row 1: (1,1) (1,2) (1,3) (1,4)
+/// Row 2: (2,1) (2,2) (2,3) (2,4)
+///
+/// extents: (2,4)
+///
+/// flatIdx: 0 1 2 3 4 5 6 7
+/// Indices: (1,1) (1,2) (1,3) (1,4) (2,1) (2,2) (2,3) (2,4)
+static SmallVector<Value> convertFlatToMultiDim(OpBuilder &builder,
+ Location loc, Value flatIdx,
+ Value arrayBox) {
+ // Get array type and rank
+ auto boxType = cast<fir::BoxType>(arrayBox.getType());
+ auto seqType = cast<fir::SequenceType>(boxType.getEleTy());
+ int rank = seqType.getDimension();
+
+ // Get all extents
+ SmallVector<Value> extents;
+ // Get extents for each dimension
+ for (int i = 0; i < rank; ++i) {
+ auto dimIdx = arith::ConstantIndexOp::create(builder, loc, i);
+ auto boxDims = fir::BoxDimsOp::create(builder, loc, arrayBox, dimIdx);
+ extents.push_back(boxDims.getResult(1));
+ }
+
+ // Convert flat index to multi-dimensional indices
+ SmallVector<Value> indices(rank);
+ Value temp = flatIdx;
+ auto c1 = builder.create<arith::ConstantIndexOp>(loc, 1);
+
+ // Work backwards through dimensions (row-major order)
+ for (int i = rank - 1; i >= 0; --i) {
+ Value zeroBasedIdx = builder.create<arith::RemSIOp>(loc, temp, extents[i]);
+ // Convert to one-based index
+ indices[i] = builder.create<arith::AddIOp>(loc, zeroBasedIdx, c1);
+ if (i > 0) {
+ temp = builder.create<arith::DivSIOp>(loc, temp, extents[i]);
+ }
+ }
+
+ return indices;
+}
+
+/// Calculate the total number of elements in the array box
+/// (totalElems = extent(1) * extent(2) * ... * extent(n))
+static Value CalculateTotalElements(OpBuilder &builder, Location loc,
+ Value arrayBox) {
+ auto boxType = cast<fir::BoxType>(arrayBox.getType());
+ auto seqType = cast<fir::SequenceType>(boxType.getEleTy());
+ int rank = seqType.getDimension();
+
+ Value totalElems = nullptr;
+ for (int i = 0; i < rank; ++i) {
+ auto dimIdx = arith::ConstantIndexOp::create(builder, loc, i);
+ auto boxDims = fir::BoxDimsOp::create(builder, loc, arrayBox, dimIdx);
+ Value extent = boxDims.getResult(1);
+ if (i == 0) {
+ totalElems = extent;
+ } else {
+ totalElems = builder.create<arith::MulIOp>(loc, totalElems, extent);
+ }
+ }
+ return totalElems;
+}
+
+/// Replace the FortranAAssign runtime call with an unordered do loop
+static void replaceWithUnorderedDoLoop(OpBuilder &builder, Location loc,
+ omp::TeamsOp teamsOp,
+ omp::WorkdistributeOp workdistribute,
+ fir::CallOp callOp) {
+ auto destConvert = callOp.getOperand(0).getDefiningOp<fir::ConvertOp>();
+ auto srcConvert = callOp.getOperand(1).getDefiningOp<fir::ConvertOp>();
+
+ Value destBox = destConvert.getValue();
+ Value srcBox = srcConvert.getValue();
+
+ // get defining alloca op of destBox and srcBox
+ auto destAlloca = destBox.getDefiningOp<fir::AllocaOp>();
+
+ if (!destAlloca) {
+ emitError(loc, "Unimplemented: FortranAssign to OpenMP lowering\n");
+ return;
+ }
+
+ // get the store op that stores to the alloca
+ for (auto user : destAlloca->getUsers()) {
+ if (auto storeOp = dyn_cast<fir::StoreOp>(user)) {
+ destBox = storeOp.getValue();
+ break;
+ }
+ }
+
+ builder.setInsertionPoint(teamsOp);
+ // Load destination array box (if it's a reference)
+ Value arrayBox = destBox;
+ if (isa<fir::ReferenceType>(destBox.getType()))
+ arrayBox = builder.create<fir::LoadOp>(loc, destBox);
+
+ auto scalarValue = builder.create<fir::BoxAddrOp>(loc, srcBox);
+ Value scalar = builder.create<fir::LoadOp>(loc, scalarValue);
+
+ // Calculate total number of elements (flattened)
+ auto c0 = builder.create<arith::ConstantIndexOp>(loc, 0);
+ auto c1 = builder.create<arith::ConstantIndexOp>(loc, 1);
+ Value totalElems = CalculateTotalElements(builder, loc, arrayBox);
+
+ auto *workdistributeBlock = &workdistribute.getRegion().front();
+ builder.setInsertionPointToStart(workdistributeBlock);
+ // Create single unordered loop for flattened array
+ auto doLoop = fir::DoLoopOp::create(builder, loc, c0, totalElems, c1, true);
+ Block *loopBlock = &doLoop.getRegion().front();
+ builder.setInsertionPointToStart(doLoop.getBody());
+
+ auto flatIdx = loopBlock->getArgument(0);
+ SmallVector<Value> indices =
+ convertFlatToMultiDim(builder, loc, flatIdx, arrayBox);
+ // Use fir.array_coor for linear addressing
+ auto elemPtr = fir::ArrayCoorOp::create(
+ builder, loc, fir::ReferenceType::get(scalar.getType()), arrayBox,
+ nullptr, nullptr, ValueRange{indices}, ValueRange{});
+
+ builder.create<fir::StoreOp>(loc, scalar, elemPtr);
+}
+
+/// workdistributeRuntimeCallLower method finds the runtime calls
+/// nested in teams {workdistribute{}} and
+/// lowers FortranAAssign to unordered do loop if src is scalar and dest is
+/// array. Other runtime calls are not handled currently.
+static FailureOr<bool>
+workdistributeRuntimeCallLower(omp::WorkdistributeOp workdistribute,
+ SetVector<omp::TargetOp> &targetOpsToProcess) {
+ OpBuilder rewriter(workdistribute);
+ auto loc = workdistribute->getLoc();
+ auto teams = dyn_cast<omp::TeamsOp>(workdistribute->getParentOp());
+ if (!teams) {
+ emitError(loc, "workdistribute not nested in teams\n");
+ return failure();
+ }
+ if (workdistribute.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "workdistribute with multiple blocks\n");
+ return failure();
+ }
+ if (teams.getRegion().getBlocks().size() != 1) {
+ emitError(loc, "teams with multiple blocks\n");
+ return failure();
+ }
+ bool changed = false;
+ // Get the target op parent of teams
+ omp::TargetOp targetOp = dyn_cast<omp::TargetOp>(teams->getParentOp());
+ SmallVector<Operation *> opsToErase;
+ for (auto &op : workdistribute.getOps()) {
+ if (isRuntimeCall(&op)) {
+ rewriter.setInsertionPoint(&op);
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ if (isFortranAssignSrcScalarAndDestArray(runtimeCall) && targetOp) {
+ // Record the target ops to process later
+ targetOpsToProcess.insert(targetOp);
+ replaceWithUnorderedDoLoop(rewriter, loc, teams, workdistribute,
+ runtimeCall);
+ opsToErase.push_back(&op);
+ changed = true;
+ }
+ }
+ }
+ }
+ // Erase the runtime calls that have been replaced.
+ for (auto *op : opsToErase) {
+ op->erase();
+ }
+ return changed;
+}
+
+/// teamsWorkdistributeToSingleOp method hoists all the ops inside
+/// teams {workdistribute{}} before teams op.
+///
+/// If A() and B () are present inside teams workdistribute
+///
+/// omp.teams {
+/// omp.workdistribute {
+/// A()
+/// B()
+/// }
+/// }
+///
+/// Then, its lowered to
+///
+/// A()
+/// B()
+///
+/// If only the terminator remains in teams after hoisting, we erase teams op.
+static bool
+teamsWorkdistributeToSingleOp(omp::TeamsOp teamsOp,
+ SetVector<omp::TargetOp> &targetOpsToProcess) {
+ auto workdistributeOp = getPerfectlyNested<omp::WorkdistributeOp>(teamsOp);
+ if (!workdistributeOp)
+ return false;
+ // Get the block containing teamsOp (the parent block).
+ Block *parentBlock = teamsOp->getBlock();
+ Block &workdistributeBlock = *workdistributeOp.getRegion().begin();
+ // Record the target ops to process later
+ for (auto &op : workdistributeBlock.getOperations()) {
+ if (shouldParallelize(&op)) {
+ auto targetOp = dyn_cast<omp::TargetOp>(teamsOp->getParentOp());
+ if (targetOp) {
+ targetOpsToProcess.insert(targetOp);
+ }
+ }
+ }
+ auto insertPoint = Block::iterator(teamsOp);
+ // Get the range of operations to move (excluding the terminator).
+ auto workdistributeBegin = workdistributeBlock.begin();
+ auto workdistributeEnd = workdistributeBlock.getTerminator()->getIterator();
+ // Move the operations from workdistribute block to before teamsOp.
+ parentBlock->getOperations().splice(insertPoint,
+ workdistributeBlock.getOperations(),
+ workdistributeBegin, workdistributeEnd);
+ // Erase the now-empty workdistributeOp.
+ workdistributeOp.erase();
+ Block &teamsBlock = *teamsOp.getRegion().begin();
+ // Check if only the terminator remains and erase teams op.
+ if (teamsBlock.getOperations().size() == 1 &&
+ teamsBlock.getTerminator() != nullptr) {
+ teamsOp.erase();
+ }
+ return true;
+}
+
+/// If multiple workdistribute are nested in a target regions, we will need to
+/// split the target region, but we want to preserve the data semantics of the
+/// original data region and avoid unnecessary data movement at each of the
+/// subkernels - we split the target region into a target_data{target}
+/// nest where only the outer one moves the data
+FailureOr<omp::TargetOp> splitTargetData(omp::TargetOp targetOp,
+ RewriterBase &rewriter) {
+ auto loc = targetOp->getLoc();
+ if (targetOp.getMapVars().empty()) {
+ emitError(loc, "Target region has no data maps\n");
+ return failure();
+ }
+ // Collect all the mapinfo ops
+ SmallVector<omp::MapInfoOp> mapInfos;
+ for (auto opr : targetOp.getMapVars()) {
+ auto mapInfo = cast<omp::MapInfoOp>(opr.getDefiningOp());
+ mapInfos.push_back(mapInfo);
+ }
+
+ rewriter.setInsertionPoint(targetOp);
+ SmallVector<Value> innerMapInfos;
+ SmallVector<Value> outerMapInfos;
+ // Create new mapinfo ops for the inner target region
+ for (auto mapInfo : mapInfos) {
+ auto originalMapType =
+ (llvm::omp::OpenMPOffloadMappingFlags)(mapInfo.getMapType());
+ auto originalCaptureType = mapInfo.getMapCaptureType();
+ llvm::omp::OpenMPOffloadMappingFlags newMapType;
+ mlir::omp::VariableCaptureKind newCaptureType;
+ // For bycopy, we keep the same map type and capture type
+ // For byref, we change the map type to none and keep the capture type
+ if (originalCaptureType == mlir::omp::VariableCaptureKind::ByCopy) {
+ newMapType = originalMapType;
+ newCaptureType = originalCaptureType;
+ } else if (originalCaptureType == mlir::omp::VariableCaptureKind::ByRef) {
+ newMapType = llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE;
+ newCaptureType = originalCaptureType;
+ outerMapInfos.push_back(mapInfo);
+ } else {
+ emitError(targetOp->getLoc(), "Unhandled case");
+ return failure();
+ }
+ auto innerMapInfo = cast<omp::MapInfoOp>(rewriter.clone(*mapInfo));
+ innerMapInfo.setMapTypeAttr(rewriter.getIntegerAttr(
+ rewriter.getIntegerType(64, false),
+ static_cast<
+ std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+ newMapType)));
+ innerMapInfo.setMapCaptureType(newCaptureType);
+ innerMapInfos.push_back(innerMapInfo.getResult());
+ }
+
+ rewriter.setInsertionPoint(targetOp);
+ auto device = targetOp.getDevice();
+ auto ifExpr = targetOp.getIfExpr();
+ auto deviceAddrVars = targetOp.getHasDeviceAddrVars();
+ auto devicePtrVars = targetOp.getIsDevicePtrVars();
+ // Create the target data op
+ auto targetDataOp = rewriter.create<omp::TargetDataOp>(
+ loc, device, ifExpr, outerMapInfos, deviceAddrVars, devicePtrVars);
+ auto taregtDataBlock = rewriter.createBlock(&targetDataOp.getRegion());
+ rewriter.create<mlir::omp::TerminatorOp>(loc);
+ rewriter.setInsertionPointToStart(taregtDataBlock);
+ // Create the inner target op
+ auto newTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(),
+ targetOp.getHostEvalVars(), targetOp.getIfExpr(),
+ targetOp.getInReductionVars(), targetOp.getInReductionByrefAttr(),
+ targetOp.getInReductionSymsAttr(), targetOp.getIsDevicePtrVars(),
+ innerMapInfos, targetOp.getNowaitAttr(), targetOp.getPrivateVars(),
+ targetOp.getPrivateSymsAttr(), targetOp.getPrivateNeedsBarrierAttr(),
+ targetOp.getThreadLimit(), targetOp.getPrivateMapsAttr());
+ rewriter.inlineRegionBefore(targetOp.getRegion(), newTargetOp.getRegion(),
+ newTargetOp.getRegion().begin());
+ rewriter.replaceOp(targetOp, targetDataOp);
+ return newTargetOp;
+}
+
+/// getNestedOpToIsolate function is designed to identify a specific teams
+/// parallel op within the body of an omp::TargetOp that should be "isolated."
+/// This returns a tuple of op, if its first op in targetBlock, or if the op is
+/// last op in the traget block.
+static std::optional<std::tuple<Operation *, bool, bool>>
+getNestedOpToIsolate(omp::TargetOp targetOp) {
+ if (targetOp.getRegion().empty())
+ return std::nullopt;
+ auto *targetBlock = &targetOp.getRegion().front();
+ for (auto &op : *targetBlock) {
+ bool first = &op == &*targetBlock->begin();
+ bool last = op.getNextNode() == targetBlock->getTerminator();
+ if (first && last)
+ return std::nullopt;
+
+ if (isa<omp::TeamsOp>(&op))
+ return {{&op, first, last}};
+ }
+ return std::nullopt;
+}
+
+/// Temporary structure to hold the two mapinfo ops
+struct TempOmpVar {
+ omp::MapInfoOp from, to;
+};
+
+/// isPtr checks if the type is a pointer or reference type.
+static bool isPtr(Type ty) {
+ return isa<fir::ReferenceType>(ty) || isa<LLVM::LLVMPointerType>(ty);
+}
+
+/// getPtrTypeForOmp returns an LLVM pointer type for the given type.
+static Type getPtrTypeForOmp(Type ty) {
+ if (isPtr(ty))
+ return LLVM::LLVMPointerType::get(ty.getContext());
+ else
+ return fir::ReferenceType::get(ty);
+}
+
+/// allocateTempOmpVar allocates a temporary variable for OpenMP mapping
+static TempOmpVar allocateTempOmpVar(Location loc, Type ty,
+ RewriterBase &rewriter) {
+ MLIRContext &ctx = *ty.getContext();
+ Value alloc;
+ Type allocType;
+ auto llvmPtrTy = LLVM::LLVMPointerType::get(&ctx);
+ // Get the appropriate type for allocation
+ if (isPtr(ty)) {
+ Type intTy = rewriter.getI32Type();
+ auto one = rewriter.create<LLVM::ConstantOp>(loc, intTy, 1);
+ allocType = llvmPtrTy;
+ alloc = rewriter.create<LLVM::AllocaOp>(loc, llvmPtrTy, allocType, one);
+ allocType = intTy;
+ } else {
+ allocType = ty;
+ alloc = rewriter.create<fir::AllocaOp>(loc, allocType);
+ }
+ // Lambda to create mapinfo ops
+ auto getMapInfo = [&](uint64_t mappingFlags, const char *name) {
+ return rewriter.create<omp::MapInfoOp>(
+ loc, alloc.getType(), alloc, TypeAttr::get(allocType),
+ rewriter.getIntegerAttr(rewriter.getIntegerType(64, /*isSigned=*/false),
+ mappingFlags),
+ rewriter.getAttr<omp::VariableCaptureKindAttr>(
+ omp::VariableCaptureKind::ByRef),
+ /*varPtrPtr=*/Value{},
+ /*members=*/SmallVector<Value>{},
+ /*member_index=*/mlir::ArrayAttr{},
+ /*bounds=*/ValueRange(),
+ /*mapperId=*/mlir::FlatSymbolRefAttr(),
+ /*name=*/rewriter.getStringAttr(name), rewriter.getBoolAttr(false));
+ };
+ // Create mapinfo ops.
+ uint64_t mapFrom =
+ static_cast<std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM);
+ uint64_t mapTo =
+ static_cast<std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO);
+ auto mapInfoFrom = getMapInfo(mapFrom, "__flang_workdistribute_from");
+ auto mapInfoTo = getMapInfo(mapTo, "__flang_workdistribute_to");
+ return TempOmpVar{mapInfoFrom, mapInfoTo};
+}
+
+// usedOutsideSplit checks if a value is used outside the split operation.
+static bool usedOutsideSplit(Value v, Operation *split) {
+ if (!split)
+ return false;
+ auto targetOp = cast<omp::TargetOp>(split->getParentOp());
+ auto *targetBlock = &targetOp.getRegion().front();
+ for (auto *user : v.getUsers()) {
+ while (user->getBlock() != targetBlock) {
+ user = user->getParentOp();
+ }
+ if (!user->isBeforeInBlock(split))
+ return true;
+ }
+ return false;
+}
+
+/// isRecomputableAfterFission checks if an operation can be recomputed
+static bool isRecomputableAfterFission(Operation *op, Operation *splitBefore) {
+ // If the op has side effects, it cannot be recomputed.
+ // We consider fir.declare as having no side effects.
+ return isa<fir::DeclareOp>(op) || isMemoryEffectFree(op);
+}
+
+/// collectNonRecomputableDeps collects dependencies that cannot be recomputed
+static void collectNonRecomputableDeps(Value &v, omp::TargetOp targetOp,
+ SetVector<Operation *> &nonRecomputable,
+ SetVector<Operation *> &toCache,
+ SetVector<Operation *> &toRecompute) {
+ Operation *op = v.getDefiningOp();
+ // If v is a block argument, it must be from the targetOp.
+ if (!op) {
+ assert(cast<BlockArgument>(v).getOwner()->getParentOp() == targetOp);
+ return;
+ }
+ // If the op is in the nonRecomputable set, add it to toCache and return.
+ if (nonRecomputable.contains(op)) {
+ toCache.insert(op);
+ return;
+ }
+ // Add the op to toRecompute.
+ toRecompute.insert(op);
+ for (auto opr : op->getOperands())
+ collectNonRecomputableDeps(opr, targetOp, nonRecomputable, toCache,
+ toRecompute);
+}
+
+/// createBlockArgsAndMap creates block arguments and maps them
+static void createBlockArgsAndMap(Location loc, RewriterBase &rewriter,
+ omp::TargetOp &targetOp, Block *targetBlock,
+ Block *newTargetBlock,
+ SmallVector<Value> &hostEvalVars,
+ SmallVector<Value> &mapOperands,
+ SmallVector<Value> &allocs,
+ IRMapping &irMapping) {
+ // FIRST: Map `host_eval_vars` to block arguments
+ unsigned originalHostEvalVarsSize = targetOp.getHostEvalVars().size();
+ for (unsigned i = 0; i < hostEvalVars.size(); ++i) {
+ Value originalValue;
+ BlockArgument newArg;
+ if (i < originalHostEvalVarsSize) {
+ originalValue = targetBlock->getArgument(i); // Host_eval args come first
+ newArg = newTargetBlock->addArgument(originalValue.getType(),
+ originalValue.getLoc());
+ } else {
+ originalValue = hostEvalVars[i];
+ newArg = newTargetBlock->addArgument(originalValue.getType(),
+ originalValue.getLoc());
+ }
+ irMapping.map(originalValue, newArg);
+ }
+
+ // SECOND: Map `map_operands` to block arguments
+ unsigned originalMapVarsSize = targetOp.getMapVars().size();
+ for (unsigned i = 0; i < mapOperands.size(); ++i) {
+ Value originalValue;
+ BlockArgument newArg;
+ // Map the new arguments from the original block.
+ if (i < originalMapVarsSize) {
+ originalValue = targetBlock->getArgument(originalHostEvalVarsSize +
+ i); // Offset by host_eval count
+ newArg = newTargetBlock->addArgument(originalValue.getType(),
+ originalValue.getLoc());
+ }
+ // Map the new arguments from the `allocs`.
+ else {
+ originalValue = allocs[i - originalMapVarsSize];
+ newArg = newTargetBlock->addArgument(
+ getPtrTypeForOmp(originalValue.getType()), originalValue.getLoc());
+ }
+ irMapping.map(originalValue, newArg);
+ }
+
+ // THIRD: Map `private_vars` to block arguments (if any)
+ unsigned originalPrivateVarsSize = targetOp.getPrivateVars().size();
+ for (unsigned i = 0; i < originalPrivateVarsSize; ++i) {
+ auto originalArg = targetBlock->getArgument(originalHostEvalVarsSize +
+ originalMapVarsSize + i);
+ auto newArg = newTargetBlock->addArgument(originalArg.getType(),
+ originalArg.getLoc());
+ irMapping.map(originalArg, newArg);
+ }
+ return;
+}
+
+/// reloadCacheAndRecompute reloads cached values and recomputes operations
+static void reloadCacheAndRecompute(
+ Location loc, RewriterBase &rewriter, Operation *splitBefore,
+ omp::TargetOp &targetOp, Block *targetBlock, Block *newTargetBlock,
+ SmallVector<Value> &hostEvalVars, SmallVector<Value> &mapOperands,
+ SmallVector<Value> &allocs, SetVector<Operation *> &toRecompute,
+ IRMapping &irMapping) {
+ // Handle the load operations for the allocs.
+ rewriter.setInsertionPointToStart(newTargetBlock);
+ auto llvmPtrTy = LLVM::LLVMPointerType::get(targetOp.getContext());
+
+ unsigned originalMapVarsSize = targetOp.getMapVars().size();
+ unsigned hostEvalVarsSize = hostEvalVars.size();
+ // Create load operations for each allocated variable.
+ for (unsigned i = 0; i < allocs.size(); ++i) {
+ Value original = allocs[i];
+ // Get the new block argument for this specific allocated value.
+ Value newArg =
+ newTargetBlock->getArgument(hostEvalVarsSize + originalMapVarsSize + i);
+ Value restored;
+ // If the original value is a pointer or reference, load and convert if
+ // necessary.
+ if (isPtr(original.getType())) {
+ restored = rewriter.create<LLVM::LoadOp>(loc, llvmPtrTy, newArg);
+ if (!isa<LLVM::LLVMPointerType>(original.getType()))
+ restored =
+ rewriter.create<fir::ConvertOp>(loc, original.getType(), restored);
+ } else {
+ restored = rewriter.create<fir::LoadOp>(loc, newArg);
+ }
+ irMapping.map(original, restored);
+ }
+ // Clone the operations if they are in the toRecompute set.
+ for (auto it = targetBlock->begin(); it != splitBefore->getIterator(); it++) {
+ if (toRecompute.contains(&*it))
+ rewriter.clone(*it, irMapping);
+ }
+}
+
+/// Given a teamsOp, navigate down the nested structure to find the
+/// innermost LoopNestOp. The expected nesting is:
+/// teams -> parallel -> distribute -> wsloop -> loop_nest
+static mlir::omp::LoopNestOp getLoopNestFromTeams(mlir::omp::TeamsOp teamsOp) {
+ if (teamsOp.getRegion().empty())
+ return nullptr;
+ // Ensure the teams region has a single block.
+ if (teamsOp.getRegion().getBlocks().size() != 1)
+ return nullptr;
+ // Find parallel op inside teams
+ mlir::omp::ParallelOp parallelOp = nullptr;
+ // Look for the parallel op in the teams region
+ for (auto &op : teamsOp.getRegion().front()) {
+ if (auto parallel = dyn_cast<mlir::omp::ParallelOp>(op)) {
+ parallelOp = parallel;
+ break;
+ }
+ }
+ if (!parallelOp)
+ return nullptr;
+
+ // Find distribute op inside parallel
+ mlir::omp::DistributeOp distributeOp = nullptr;
+ for (auto &op : parallelOp.getRegion().front()) {
+ if (auto distribute = dyn_cast<mlir::omp::DistributeOp>(op)) {
+ distributeOp = distribute;
+ break;
+ }
+ }
+ if (!distributeOp)
+ return nullptr;
+
+ // Find wsloop op inside distribute
+ mlir::omp::WsloopOp wsloopOp = nullptr;
+ for (auto &op : distributeOp.getRegion().front()) {
+ if (auto wsloop = dyn_cast<mlir::omp::WsloopOp>(op)) {
+ wsloopOp = wsloop;
+ break;
+ }
+ }
+ if (!wsloopOp)
+ return nullptr;
+
+ // Find loop_nest op inside wsloop
+ for (auto &op : wsloopOp.getRegion().front()) {
+ if (auto loopNest = dyn_cast<mlir::omp::LoopNestOp>(op)) {
+ return loopNest;
+ }
+ }
+
+ return nullptr;
+}
+
+/// Generate LLVM constant operations for i32 and i64 types.
+static mlir::LLVM::ConstantOp
+genI32Constant(mlir::Location loc, mlir::RewriterBase &rewriter, int value) {
+ mlir::Type i32Ty = rewriter.getI32Type();
+ mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value);
+ return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr);
+}
+
+/// Given a box descriptor, extract the base address of the data it describes.
+/// If the box descriptor is a reference, load it first.
+/// The base address is returned as an i8* pointer.
+static Value genDescriptorGetBaseAddress(fir::FirOpBuilder &builder,
+ Location loc, Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetBaseAddress");
+ auto i8Type = builder.getI8Type();
+ auto unknownArrayType =
+ fir::SequenceType::get({fir::SequenceType::getUnknownExtent()}, i8Type);
+ auto i8BoxType = fir::BoxType::get(unknownArrayType);
+ auto typedBox = fir::ConvertOp::create(builder, loc, i8BoxType, box);
+ auto rawAddr = fir::BoxAddrOp::create(builder, loc, typedBox);
+ return rawAddr;
+}
+
+/// Given a box descriptor, extract the total number of elements in the array it
+/// describes. If the box descriptor is a reference, load it first.
+/// The total number of elements is returned as an i64 value.
+static Value genDescriptorGetTotalElements(fir::FirOpBuilder &builder,
+ Location loc, Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetTotalElements");
+ auto i64Type = builder.getI64Type();
+ return fir::BoxTotalElementsOp::create(builder, loc, i64Type, box);
+}
+
+/// Given a box descriptor, extract the size of each element in the array it
+/// describes. If the box descriptor is a reference, load it first.
+/// The element size is returned as an i64 value.
+static Value genDescriptorGetEleSize(fir::FirOpBuilder &builder, Location loc,
+ Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetElementSize");
+ auto i64Type = builder.getI64Type();
+ return fir::BoxEleSizeOp::create(builder, loc, i64Type, box);
+}
+
+/// Given a box descriptor, compute the total size in bytes of the data it
+/// describes. This is done by multiplying the total number of elements by the
+/// size of each element. If the box descriptor is a reference, load it first.
+/// The total size in bytes is returned as an i64 value.
+static Value genDescriptorGetDataSizeInBytes(fir::FirOpBuilder &builder,
+ Location loc, Value boxDesc) {
+ Value box = boxDesc;
+ if (auto refBox = dyn_cast<fir::ReferenceType>(boxDesc.getType())) {
+ box = fir::LoadOp::create(builder, loc, boxDesc);
+ }
+ assert(isa<fir::BoxType>(box.getType()) &&
+ "Unknown type passed to genDescriptorGetElementSize");
+ Value eleSize = genDescriptorGetEleSize(builder, loc, box);
+ Value totalElements = genDescriptorGetTotalElements(builder, loc, box);
+ return mlir::arith::MulIOp::create(builder, loc, totalElements, eleSize);
+}
+
+/// Generate a call to the OpenMP runtime function `omp_get_mapped_ptr` to
+/// retrieve the device pointer corresponding to a given host pointer and device
+/// number. If no mapping exists, the original host pointer is returned.
+/// Signature:
+/// void *omp_get_mapped_ptr(void *host_ptr, int device_num);
+static mlir::Value genOmpGetMappedPtrIfPresent(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value hostPtr,
+ mlir::Value deviceNum,
+ mlir::ModuleOp module) {
+ auto *context = builder.getContext();
+ auto voidPtrType = fir::LLVMPointerType::get(context, builder.getI8Type());
+ auto i32Type = builder.getI32Type();
+ auto funcName = "omp_get_mapped_ptr";
+ auto funcOp = module.lookupSymbol<mlir::func::FuncOp>(funcName);
+
+ if (!funcOp) {
+ auto funcType =
+ mlir::FunctionType::get(context, {voidPtrType, i32Type}, {voidPtrType});
+
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(module.getBody());
+
+ funcOp = mlir::func::FuncOp::create(builder, loc, funcName, funcType);
+ funcOp.setPrivate();
+ }
+
+ llvm::SmallVector<mlir::Value> args;
+ args.push_back(fir::ConvertOp::create(builder, loc, voidPtrType, hostPtr));
+ args.push_back(fir::ConvertOp::create(builder, loc, i32Type, deviceNum));
+ auto callOp = fir::CallOp::create(builder, loc, funcOp, args);
+ auto mappedPtr = callOp.getResult(0);
+ auto isNull = builder.genIsNullAddr(loc, mappedPtr);
+ auto convertedHostPtr =
+ fir::ConvertOp::create(builder, loc, voidPtrType, hostPtr);
+ auto result = arith::SelectOp::create(builder, loc, isNull, convertedHostPtr,
+ mappedPtr);
+ return result;
+}
+
+/// Generate a call to the OpenMP runtime function `omp_target_memcpy` to
+/// perform memory copy between host and device or between devices.
+/// Signature:
+/// int omp_target_memcpy(void *dst, const void *src, size_t length,
+/// size_t dst_offset, size_t src_offset,
+/// int dst_device, int src_device);
+static void genOmpTargetMemcpyCall(fir::FirOpBuilder &builder,
+ mlir::Location loc, mlir::Value dst,
+ mlir::Value src, mlir::Value length,
+ mlir::Value dstOffset, mlir::Value srcOffset,
+ mlir::Value device, mlir::ModuleOp module) {
+ auto *context = builder.getContext();
+ auto funcName = "omp_target_memcpy";
+ auto voidPtrType = fir::LLVMPointerType::get(context, builder.getI8Type());
+ auto sizeTType = builder.getI64Type(); // assuming size_t is 64-bit
+ auto i32Type = builder.getI32Type();
+ auto funcOp = module.lookupSymbol<mlir::func::FuncOp>(funcName);
+
+ if (!funcOp) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(module.getBody());
+ llvm::SmallVector<mlir::Type> argTypes = {
+ voidPtrType, voidPtrType, sizeTType, sizeTType,
+ sizeTType, i32Type, i32Type};
+ auto funcType = mlir::FunctionType::get(context, argTypes, {i32Type});
+ funcOp = mlir::func::FuncOp::create(builder, loc, funcName, funcType);
+ funcOp.setPrivate();
+ }
+
+ llvm::SmallVector<mlir::Value> args{dst, src, length, dstOffset,
+ srcOffset, device, device};
+ fir::CallOp::create(builder, loc, funcOp, args);
+ return;
+}
+
+/// Generate code to replace a Fortran array assignment call with OpenMP
+/// runtime calls to perform the equivalent operation on the device.
+/// This involves extracting the source and destination pointers from the
+/// Fortran array descriptors, retrieving their mapped device pointers (if any),
+/// and invoking `omp_target_memcpy` to copy the data on the device.
+static void genFortranAssignOmpReplacement(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ fir::CallOp callOp,
+ mlir::Value device,
+ mlir::ModuleOp module) {
+ assert(callOp.getNumResults() == 0 &&
+ "Expected _FortranAAssign to have no results");
+ assert(callOp.getNumOperands() >= 2 &&
+ "Expected _FortranAAssign to have at least two operands");
+
+ // Extract the source and destination pointers from the call operands.
+ mlir::Value dest = callOp.getOperand(0);
+ mlir::Value src = callOp.getOperand(1);
+
+ // Get the base addresses of the source and destination arrays.
+ mlir::Value srcBase = genDescriptorGetBaseAddress(builder, loc, src);
+ mlir::Value destBase = genDescriptorGetBaseAddress(builder, loc, dest);
+
+ // Get the total size in bytes of the data to be copied.
+ mlir::Value srcDataSize = genDescriptorGetDataSizeInBytes(builder, loc, src);
+
+ // Retrieve the mapped device pointers for source and destination.
+ // If no mapping exists, the original host pointer is used.
+ Value destPtr =
+ genOmpGetMappedPtrIfPresent(builder, loc, destBase, device, module);
+ Value srcPtr =
+ genOmpGetMappedPtrIfPresent(builder, loc, srcBase, device, module);
+ Value zero = builder.create<LLVM::ConstantOp>(loc, builder.getI64Type(),
+ builder.getI64IntegerAttr(0));
+
+ // Generate the call to omp_target_memcpy to perform the data copy on the
+ // device.
+ genOmpTargetMemcpyCall(builder, loc, destPtr, srcPtr, srcDataSize, zero, zero,
+ device, module);
+}
+
+/// Struct to hold the host eval vars corresponding to loop bounds and steps
+struct HostEvalVars {
+ SmallVector<Value> lbs;
+ SmallVector<Value> ubs;
+ SmallVector<Value> steps;
+};
+
+/// moveToHost method clones all the ops from target region outside of it.
+/// It hoists runtime function "_FortranAAssign" and replaces it with omp
+/// version. Also hoists and replaces fir.allocmem with omp.target_allocmem and
+/// fir.freemem with omp.target_freemem
+static LogicalResult moveToHost(omp::TargetOp targetOp, RewriterBase &rewriter,
+ mlir::ModuleOp module,
+ struct HostEvalVars &hostEvalVars) {
+ OpBuilder::InsertionGuard guard(rewriter);
+ Block *targetBlock = &targetOp.getRegion().front();
+ assert(targetBlock == &targetOp.getRegion().back());
+ IRMapping mapping;
+
+ // Get the parent target_data op
+ auto targetDataOp = cast<omp::TargetDataOp>(targetOp->getParentOp());
+ if (!targetDataOp) {
+ emitError(targetOp->getLoc(),
+ "Expected target op to be inside target_data op");
+ return failure();
+ }
+ // create mapping for host_eval_vars
+ unsigned hostEvalVarCount = targetOp.getHostEvalVars().size();
+ for (unsigned i = 0; i < targetOp.getHostEvalVars().size(); ++i) {
+ Value hostEvalVar = targetOp.getHostEvalVars()[i];
+ BlockArgument arg = targetBlock->getArguments()[i];
+ mapping.map(arg, hostEvalVar);
+ }
+ // create mapping for map_vars
+ for (unsigned i = 0; i < targetOp.getMapVars().size(); ++i) {
+ Value mapInfo = targetOp.getMapVars()[i];
+ BlockArgument arg = targetBlock->getArguments()[hostEvalVarCount + i];
+ Operation *op = mapInfo.getDefiningOp();
+ assert(op);
+ auto mapInfoOp = cast<omp::MapInfoOp>(op);
+ // map the block argument to the host-side variable pointer
+ mapping.map(arg, mapInfoOp.getVarPtr());
+ }
+ // create mapping for private_vars
+ unsigned mapSize = targetOp.getMapVars().size();
+ for (unsigned i = 0; i < targetOp.getPrivateVars().size(); ++i) {
+ Value privateVar = targetOp.getPrivateVars()[i];
+ // The mapping should link the device-side variable to the host-side one.
+ BlockArgument arg =
+ targetBlock->getArguments()[hostEvalVarCount + mapSize + i];
+ // Map the device-side copy (`arg`) to the host-side value (`privateVar`).
+ mapping.map(arg, privateVar);
+ }
+
+ rewriter.setInsertionPoint(targetOp);
+ SmallVector<Operation *> opsToReplace;
+ Value device = targetOp.getDevice();
+
+ // If device is not specified, default to device 0.
+ if (!device) {
+ device = genI32Constant(targetOp.getLoc(), rewriter, 0);
+ }
+ // Clone all operations.
+ for (auto it = targetBlock->begin(), end = std::prev(targetBlock->end());
+ it != end; ++it) {
+ auto *op = &*it;
+ Operation *clonedOp = rewriter.clone(*op, mapping);
+ // Map the results of the original op to the cloned op.
+ for (unsigned i = 0; i < op->getNumResults(); ++i) {
+ mapping.map(op->getResult(i), clonedOp->getResult(i));
+ }
+ // fir.declare changes its type when hoisting it out of omp.target to
+ // omp.target_data Introduce a load, if original declareOp input is not of
+ // reference type, but cloned delcareOp input is reference type.
+ if (fir::DeclareOp clonedDeclareOp = dyn_cast<fir::DeclareOp>(clonedOp)) {
+ auto originalDeclareOp = cast<fir::DeclareOp>(op);
+ Type originalInType = originalDeclareOp.getMemref().getType();
+ Type clonedInType = clonedDeclareOp.getMemref().getType();
+
+ fir::ReferenceType originalRefType =
+ dyn_cast<fir::ReferenceType>(originalInType);
+ fir::ReferenceType clonedRefType =
+ dyn_cast<fir::ReferenceType>(clonedInType);
+ if (!originalRefType && clonedRefType) {
+ Type clonedEleTy = clonedRefType.getElementType();
+ if (clonedEleTy == originalDeclareOp.getType()) {
+ opsToReplace.push_back(clonedOp);
+ }
+ }
+ }
+ // Collect the ops to be replaced.
+ if (isa<fir::AllocMemOp>(clonedOp) || isa<fir::FreeMemOp>(clonedOp))
+ opsToReplace.push_back(clonedOp);
+ // Check for runtime calls to be replaced.
+ if (isRuntimeCall(clonedOp)) {
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ opsToReplace.push_back(clonedOp);
+ } else {
+ emitError(runtimeCall->getLoc(), "Unhandled runtime call hoisting.");
+ return failure();
+ }
+ }
+ }
+ // Replace fir.allocmem with omp.target_allocmem.
+ for (Operation *op : opsToReplace) {
+ if (auto allocOp = dyn_cast<fir::AllocMemOp>(op)) {
+ rewriter.setInsertionPoint(allocOp);
+ auto ompAllocmemOp = rewriter.create<omp::TargetAllocMemOp>(
+ allocOp.getLoc(), rewriter.getI64Type(), device,
+ allocOp.getInTypeAttr(), allocOp.getUniqNameAttr(),
+ allocOp.getBindcNameAttr(), allocOp.getTypeparams(),
+ allocOp.getShape());
+ auto firConvertOp = rewriter.create<fir::ConvertOp>(
+ allocOp.getLoc(), allocOp.getResult().getType(),
+ ompAllocmemOp.getResult());
+ rewriter.replaceOp(allocOp, firConvertOp.getResult());
+ }
+ // Replace fir.freemem with omp.target_freemem.
+ else if (auto freeOp = dyn_cast<fir::FreeMemOp>(op)) {
+ rewriter.setInsertionPoint(freeOp);
+ auto firConvertOp = rewriter.create<fir::ConvertOp>(
+ freeOp.getLoc(), rewriter.getI64Type(), freeOp.getHeapref());
+ rewriter.create<omp::TargetFreeMemOp>(freeOp.getLoc(), device,
+ firConvertOp.getResult());
+ rewriter.eraseOp(freeOp);
+ }
+ // fir.declare changes its type when hoisting it out of omp.target to
+ // omp.target_data Introduce a load, if original declareOp input is not of
+ // reference type, but cloned delcareOp input is reference type.
+ else if (fir::DeclareOp clonedDeclareOp = dyn_cast<fir::DeclareOp>(op)) {
+ Type clonedInType = clonedDeclareOp.getMemref().getType();
+ fir::ReferenceType clonedRefType =
+ dyn_cast<fir::ReferenceType>(clonedInType);
+ Type clonedEleTy = clonedRefType.getElementType();
+ rewriter.setInsertionPoint(op);
+ Value loadedValue = rewriter.create<fir::LoadOp>(
+ clonedDeclareOp.getLoc(), clonedEleTy, clonedDeclareOp.getMemref());
+ clonedDeclareOp.getResult().replaceAllUsesWith(loadedValue);
+ }
+ // Replace runtime calls with omp versions.
+ else if (isRuntimeCall(op)) {
+ fir::CallOp runtimeCall = cast<fir::CallOp>(op);
+ auto funcName = runtimeCall.getCallee()->getRootReference().getValue();
+ if (funcName == FortranAssignStr) {
+ rewriter.setInsertionPoint(op);
+ fir::FirOpBuilder builder{rewriter, op};
+
+ mlir::Location loc = runtimeCall.getLoc();
+ genFortranAssignOmpReplacement(builder, loc, runtimeCall, device,
+ module);
+ rewriter.eraseOp(op);
+ } else {
+ emitError(runtimeCall->getLoc(), "Unhandled runtime call hoisting.");
+ return failure();
+ }
+ } else {
+ emitError(op->getLoc(), "Unhandled op hoisting.");
+ return failure();
+ }
+ }
+
+ // Update the host_eval_vars to use the mapped values.
+ for (size_t i = 0; i < hostEvalVars.lbs.size(); ++i) {
+ hostEvalVars.lbs[i] = mapping.lookup(hostEvalVars.lbs[i]);
+ hostEvalVars.ubs[i] = mapping.lookup(hostEvalVars.ubs[i]);
+ hostEvalVars.steps[i] = mapping.lookup(hostEvalVars.steps[i]);
+ }
+ // Finally erase the original targetOp.
+ rewriter.eraseOp(targetOp);
+ return success();
+}
+
+/// Result of isolateOp method
+struct SplitResult {
+ omp::TargetOp preTargetOp;
+ omp::TargetOp isolatedTargetOp;
+ omp::TargetOp postTargetOp;
+};
+
+/// computeAllocsCacheRecomputable method computes the allocs needed to cache
+/// the values that are used outside the split point. It also computes the ops
+/// that need to be cached and the ops that can be recomputed after the split.
+static void computeAllocsCacheRecomputable(
+ omp::TargetOp targetOp, Operation *splitBeforeOp, RewriterBase &rewriter,
+ SmallVector<Value> &preMapOperands, SmallVector<Value> &postMapOperands,
+ SmallVector<Value> &allocs, SmallVector<Value> &requiredVals,
+ SetVector<Operation *> &nonRecomputable, SetVector<Operation *> &toCache,
+ SetVector<Operation *> &toRecompute) {
+ auto *targetBlock = &targetOp.getRegion().front();
+ // Find all values that are used outside the split point.
+ for (auto it = targetBlock->begin(); it != splitBeforeOp->getIterator();
+ it++) {
+ // Check if any of the results are used outside the split point.
+ for (auto res : it->getResults()) {
+ if (usedOutsideSplit(res, splitBeforeOp)) {
+ requiredVals.push_back(res);
+ }
+ }
+ // If the op is not recomputable, add it to the nonRecomputable set.
+ if (!isRecomputableAfterFission(&*it, splitBeforeOp)) {
+ nonRecomputable.insert(&*it);
+ }
+ }
+ // For each required value, collect its dependencies.
+ for (auto requiredVal : requiredVals)
+ collectNonRecomputableDeps(requiredVal, targetOp, nonRecomputable, toCache,
+ toRecompute);
+ // For each op in toCache, create an alloc and update the pre and post map
+ // operands.
+ for (Operation *op : toCache) {
+ for (auto res : op->getResults()) {
+ auto alloc =
+ allocateTempOmpVar(targetOp.getLoc(), res.getType(), rewriter);
+ allocs.push_back(res);
+ preMapOperands.push_back(alloc.from);
+ postMapOperands.push_back(alloc.to);
+ }
+ }
+}
+
+/// genPreTargetOp method generates the preTargetOp that contains all the ops
+/// before the split point. It also creates the block arguments and maps the
+/// values accordingly. It also creates the store operations for the allocs.
+static omp::TargetOp
+genPreTargetOp(omp::TargetOp targetOp, SmallVector<Value> &preMapOperands,
+ SmallVector<Value> &allocs, Operation *splitBeforeOp,
+ RewriterBase &rewriter, struct HostEvalVars &hostEvalVars,
+ bool isTargetDevice) {
+ auto loc = targetOp.getLoc();
+ auto *targetBlock = &targetOp.getRegion().front();
+ SmallVector<Value> preHostEvalVars{targetOp.getHostEvalVars()};
+ // update the hostEvalVars of preTargetOp
+ omp::TargetOp preTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), preHostEvalVars,
+ targetOp.getIfExpr(), targetOp.getInReductionVars(),
+ targetOp.getInReductionByrefAttr(), targetOp.getInReductionSymsAttr(),
+ targetOp.getIsDevicePtrVars(), preMapOperands, targetOp.getNowaitAttr(),
+ targetOp.getPrivateVars(), targetOp.getPrivateSymsAttr(),
+ targetOp.getPrivateNeedsBarrierAttr(), targetOp.getThreadLimit(),
+ targetOp.getPrivateMapsAttr());
+ auto *preTargetBlock = rewriter.createBlock(
+ &preTargetOp.getRegion(), preTargetOp.getRegion().begin(), {}, {});
+ IRMapping preMapping;
+ // Create block arguments and map the values.
+ createBlockArgsAndMap(loc, rewriter, targetOp, targetBlock, preTargetBlock,
+ preHostEvalVars, preMapOperands, allocs, preMapping);
+
+ // Handle the store operations for the allocs.
+ rewriter.setInsertionPointToStart(preTargetBlock);
+ auto llvmPtrTy = LLVM::LLVMPointerType::get(targetOp.getContext());
+
+ // Clone the original operations.
+ for (auto it = targetBlock->begin(); it != splitBeforeOp->getIterator();
+ it++) {
+ rewriter.clone(*it, preMapping);
+ }
+
+ unsigned originalHostEvalVarsSize = preHostEvalVars.size();
+ unsigned originalMapVarsSize = targetOp.getMapVars().size();
+ // Create Stores for allocs.
+ for (unsigned i = 0; i < allocs.size(); ++i) {
+ Value originalResult = allocs[i];
+ Value toStore = preMapping.lookup(originalResult);
+ // Get the new block argument for this specific allocated value.
+ Value newArg = preTargetBlock->getArgument(originalHostEvalVarsSize +
+ originalMapVarsSize + i);
+ // Create the store operation.
+ if (isPtr(originalResult.getType())) {
+ if (!isa<LLVM::LLVMPointerType>(toStore.getType()))
+ toStore = rewriter.create<fir::ConvertOp>(loc, llvmPtrTy, toStore);
+ rewriter.create<LLVM::StoreOp>(loc, toStore, newArg);
+ } else {
+ rewriter.create<fir::StoreOp>(loc, toStore, newArg);
+ }
+ }
+ rewriter.create<omp::TerminatorOp>(loc);
+
+ // Update hostEvalVars with the mapped values for the loop bounds if we have
+ // a loopNestOp and we are not generating code for the target device.
+ omp::LoopNestOp loopNestOp =
+ getLoopNestFromTeams(cast<omp::TeamsOp>(splitBeforeOp));
+ if (loopNestOp && !isTargetDevice) {
+ for (size_t i = 0; i < loopNestOp.getLoopLowerBounds().size(); ++i) {
+ Value lb = loopNestOp.getLoopLowerBounds()[i];
+ Value ub = loopNestOp.getLoopUpperBounds()[i];
+ Value step = loopNestOp.getLoopSteps()[i];
+
+ hostEvalVars.lbs.push_back(preMapping.lookup(lb));
+ hostEvalVars.ubs.push_back(preMapping.lookup(ub));
+ hostEvalVars.steps.push_back(preMapping.lookup(step));
+ }
+ }
+
+ return preTargetOp;
+}
+
+/// genIsolatedTargetOp method generates the isolatedTargetOp that contains the
+/// ops between the split point. It also creates the block arguments and maps
+/// the values accordingly. It also creates the load operations for the allocs
+/// and recomputes the necessary ops.
+static omp::TargetOp
+genIsolatedTargetOp(omp::TargetOp targetOp, SmallVector<Value> &postMapOperands,
+ Operation *splitBeforeOp, RewriterBase &rewriter,
+ SmallVector<Value> &allocs,
+ SetVector<Operation *> &toRecompute,
+ struct HostEvalVars &hostEvalVars, bool isTargetDevice) {
+ auto loc = targetOp.getLoc();
+ auto *targetBlock = &targetOp.getRegion().front();
+ SmallVector<Value> isolatedHostEvalVars{targetOp.getHostEvalVars()};
+ // update the hostEvalVars of isolatedTargetOp
+ if (!hostEvalVars.lbs.empty() && !isTargetDevice) {
+ isolatedHostEvalVars.append(hostEvalVars.lbs.begin(),
+ hostEvalVars.lbs.end());
+ isolatedHostEvalVars.append(hostEvalVars.ubs.begin(),
+ hostEvalVars.ubs.end());
+ isolatedHostEvalVars.append(hostEvalVars.steps.begin(),
+ hostEvalVars.steps.end());
+ }
+ // Create the isolated target op
+ omp::TargetOp isolatedTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(),
+ isolatedHostEvalVars, targetOp.getIfExpr(), targetOp.getInReductionVars(),
+ targetOp.getInReductionByrefAttr(), targetOp.getInReductionSymsAttr(),
+ targetOp.getIsDevicePtrVars(), postMapOperands, targetOp.getNowaitAttr(),
+ targetOp.getPrivateVars(), targetOp.getPrivateSymsAttr(),
+ targetOp.getPrivateNeedsBarrierAttr(), targetOp.getThreadLimit(),
+ targetOp.getPrivateMapsAttr());
+ auto *isolatedTargetBlock =
+ rewriter.createBlock(&isolatedTargetOp.getRegion(),
+ isolatedTargetOp.getRegion().begin(), {}, {});
+ IRMapping isolatedMapping;
+ // Create block arguments and map the values.
+ createBlockArgsAndMap(loc, rewriter, targetOp, targetBlock,
+ isolatedTargetBlock, isolatedHostEvalVars,
+ postMapOperands, allocs, isolatedMapping);
+ // Handle the load operations for the allocs and recompute ops.
+ reloadCacheAndRecompute(loc, rewriter, splitBeforeOp, targetOp, targetBlock,
+ isolatedTargetBlock, isolatedHostEvalVars,
+ postMapOperands, allocs, toRecompute,
+ isolatedMapping);
+
+ // Clone the original operations.
+ rewriter.clone(*splitBeforeOp, isolatedMapping);
+ rewriter.create<omp::TerminatorOp>(loc);
+
+ // update the loop bounds in the isolatedTargetOp if we have host_eval vars
+ // and we are not generating code for the target device.
+ if (!hostEvalVars.lbs.empty() && !isTargetDevice) {
+ omp::TeamsOp teamsOp;
+ for (auto &op : *isolatedTargetBlock) {
+ if (isa<omp::TeamsOp>(&op))
+ teamsOp = cast<omp::TeamsOp>(&op);
+ }
+ assert(teamsOp && "No teamsOp found in isolated target region");
+ // Get the loopNestOp inside the teamsOp
+ auto loopNestOp = getLoopNestFromTeams(teamsOp);
+ // Get the BlockArgs related to host_eval vars and update loop_nest bounds
+ // to them
+ unsigned originalHostEvalVarsSize = targetOp.getHostEvalVars().size();
+ unsigned index = originalHostEvalVarsSize;
+ // Replace loop bounds with the block arguments passed down via host_eval
+ SmallVector<Value> lbs, ubs, steps;
+
+ // Collect new lb/ub/step values from target block args
+ for (size_t i = 0; i < hostEvalVars.lbs.size(); ++i)
+ lbs.push_back(isolatedTargetBlock->getArgument(index++));
+
+ for (size_t i = 0; i < hostEvalVars.ubs.size(); ++i)
+ ubs.push_back(isolatedTargetBlock->getArgument(index++));
+
+ for (size_t i = 0; i < hostEvalVars.steps.size(); ++i)
+ steps.push_back(isolatedTargetBlock->getArgument(index++));
+
+ // Reset the loop bounds
+ loopNestOp.getLoopLowerBoundsMutable().assign(lbs);
+ loopNestOp.getLoopUpperBoundsMutable().assign(ubs);
+ loopNestOp.getLoopStepsMutable().assign(steps);
+ }
+
+ return isolatedTargetOp;
+}
+
+/// genPostTargetOp method generates the postTargetOp that contains all the ops
+/// after the split point. It also creates the block arguments and maps the
+/// values accordingly. It also creates the load operations for the allocs
+/// and recomputes the necessary ops.
+static omp::TargetOp genPostTargetOp(omp::TargetOp targetOp,
+ Operation *splitBeforeOp,
+ SmallVector<Value> &postMapOperands,
+ RewriterBase &rewriter,
+ SmallVector<Value> &allocs,
+ SetVector<Operation *> &toRecompute) {
+ auto loc = targetOp.getLoc();
+ auto *targetBlock = &targetOp.getRegion().front();
+ SmallVector<Value> postHostEvalVars{targetOp.getHostEvalVars()};
+ // Create the post target op
+ omp::TargetOp postTargetOp = rewriter.create<omp::TargetOp>(
+ targetOp.getLoc(), targetOp.getAllocateVars(),
+ targetOp.getAllocatorVars(), targetOp.getBareAttr(),
+ targetOp.getDependKindsAttr(), targetOp.getDependVars(),
+ targetOp.getDevice(), targetOp.getHasDeviceAddrVars(), postHostEvalVars,
+ targetOp.getIfExpr(), targetOp.getInReductionVars(),
+ targetOp.getInReductionByrefAttr(), targetOp.getInReductionSymsAttr(),
+ targetOp.getIsDevicePtrVars(), postMapOperands, targetOp.getNowaitAttr(),
+ targetOp.getPrivateVars(), targetOp.getPrivateSymsAttr(),
+ targetOp.getPrivateNeedsBarrierAttr(), targetOp.getThreadLimit(),
+ targetOp.getPrivateMapsAttr());
+ // Create the block for postTargetOp
+ auto *postTargetBlock = rewriter.createBlock(
+ &postTargetOp.getRegion(), postTargetOp.getRegion().begin(), {}, {});
+ IRMapping postMapping;
+ // Create block arguments and map the values.
+ createBlockArgsAndMap(loc, rewriter, targetOp, targetBlock, postTargetBlock,
+ postHostEvalVars, postMapOperands, allocs, postMapping);
+ // Handle the load operations for the allocs and recompute ops.
+ reloadCacheAndRecompute(loc, rewriter, splitBeforeOp, targetOp, targetBlock,
+ postTargetBlock, postHostEvalVars, postMapOperands,
+ allocs, toRecompute, postMapping);
+ assert(splitBeforeOp->getNumResults() == 0 ||
+ llvm::all_of(splitBeforeOp->getResults(),
+ [](Value result) { return result.use_empty(); }));
+ // Clone the original operations after the split point.
+ for (auto it = std::next(splitBeforeOp->getIterator());
+ it != targetBlock->end(); it++)
+ rewriter.clone(*it, postMapping);
+ return postTargetOp;
+}
+
+/// isolateOp method rewrites a omp.target_data { omp.target } in to
+/// omp.target_data {
+/// // preTargetOp region contains ops before splitBeforeOp.
+/// omp.target {}
+/// // isolatedTargetOp region contains splitBeforeOp,
+/// omp.target {}
+/// // postTargetOp region contains ops after splitBeforeOp.
+/// omp.target {}
+/// }
+/// It also handles the mapping of variables and the caching/recomputing
+/// of values as needed.
+static FailureOr<SplitResult> isolateOp(Operation *splitBeforeOp,
+ bool splitAfter, RewriterBase &rewriter,
+ mlir::ModuleOp module,
+ bool isTargetDevice) {
+ auto targetOp = cast<omp::TargetOp>(splitBeforeOp->getParentOp());
+ assert(targetOp);
+ rewriter.setInsertionPoint(targetOp);
+
+ // Prepare the map operands for preTargetOp and postTargetOp
+ auto preMapOperands = SmallVector<Value>(targetOp.getMapVars());
+ auto postMapOperands = SmallVector<Value>(targetOp.getMapVars());
+
+ // Vectors to hold analysis results
+ SmallVector<Value> requiredVals;
+ SetVector<Operation *> toCache;
+ SetVector<Operation *> toRecompute;
+ SetVector<Operation *> nonRecomputable;
+ SmallVector<Value> allocs;
+ struct HostEvalVars hostEvalVars;
+
+ // Analyze the ops in target region to determine which ops need to be
+ // cached and which ops need to be recomputed
+ computeAllocsCacheRecomputable(
+ targetOp, splitBeforeOp, rewriter, preMapOperands, postMapOperands,
+ allocs, requiredVals, nonRecomputable, toCache, toRecompute);
+
+ rewriter.setInsertionPoint(targetOp);
+
+ // Generate the preTargetOp that contains all the ops before splitBeforeOp.
+ auto preTargetOp =
+ genPreTargetOp(targetOp, preMapOperands, allocs, splitBeforeOp, rewriter,
+ hostEvalVars, isTargetDevice);
+
+ // Move the ops of preTarget to host.
+ auto res = moveToHost(preTargetOp, rewriter, module, hostEvalVars);
+ if (failed(res))
+ return failure();
+ rewriter.setInsertionPoint(targetOp);
+
+ // Generate the isolatedTargetOp
+ omp::TargetOp isolatedTargetOp =
+ genIsolatedTargetOp(targetOp, postMapOperands, splitBeforeOp, rewriter,
+ allocs, toRecompute, hostEvalVars, isTargetDevice);
+
+ omp::TargetOp postTargetOp = nullptr;
+ // Generate the postTargetOp that contains all the ops after splitBeforeOp.
+ if (splitAfter) {
+ rewriter.setInsertionPoint(targetOp);
+ postTargetOp = genPostTargetOp(targetOp, splitBeforeOp, postMapOperands,
+ rewriter, allocs, toRecompute);
+ }
+ // Finally erase the original targetOp.
+ rewriter.eraseOp(targetOp);
+ return SplitResult{preTargetOp, isolatedTargetOp, postTargetOp};
+}
+
+/// Recursively fission target ops until no more nested ops can be isolated.
+static LogicalResult fissionTarget(omp::TargetOp targetOp,
+ RewriterBase &rewriter,
+ mlir::ModuleOp module, bool isTargetDevice) {
+ auto tuple = getNestedOpToIsolate(targetOp);
+ if (!tuple) {
+ LLVM_DEBUG(llvm::dbgs() << " No op to isolate\n");
+ struct HostEvalVars hostEvalVars;
+ return moveToHost(targetOp, rewriter, module, hostEvalVars);
+ }
+ Operation *toIsolate = std::get<0>(*tuple);
+ bool splitBefore = !std::get<1>(*tuple);
+ bool splitAfter = !std::get<2>(*tuple);
+ // Recursively isolate the target op.
+ if (splitBefore && splitAfter) {
+ auto res =
+ isolateOp(toIsolate, splitAfter, rewriter, module, isTargetDevice);
+ if (failed(res))
+ return failure();
+ return fissionTarget((*res).postTargetOp, rewriter, module, isTargetDevice);
+ }
+ // Isolate only before the op.
+ if (splitBefore) {
+ auto res =
+ isolateOp(toIsolate, splitAfter, rewriter, module, isTargetDevice);
+ if (failed(res))
+ return failure();
+ } else {
+ emitError(toIsolate->getLoc(), "Unhandled case in fissionTarget");
+ return failure();
+ }
+ return success();
+}
+
+/// Pass to lower omp.workdistribute ops.
+class LowerWorkdistributePass
+ : public flangomp::impl::LowerWorkdistributeBase<LowerWorkdistributePass> {
+public:
+ void runOnOperation() override {
+ MLIRContext &context = getContext();
+ auto moduleOp = getOperation();
+ bool changed = false;
+ SetVector<omp::TargetOp> targetOpsToProcess;
+ auto verify =
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ if (failed(verifyTargetTeamsWorkdistribute(workdistribute)))
+ return WalkResult::interrupt();
+ return WalkResult::advance();
+ });
+ if (verify.wasInterrupted())
+ return signalPassFailure();
+
+ auto fission =
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ auto res = fissionWorkdistribute(workdistribute);
+ if (failed(res))
+ return WalkResult::interrupt();
+ changed |= *res;
+ return WalkResult::advance();
+ });
+ if (fission.wasInterrupted())
+ return signalPassFailure();
+
+ auto rtCallLower =
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ auto res = workdistributeRuntimeCallLower(workdistribute,
+ targetOpsToProcess);
+ if (failed(res))
+ return WalkResult::interrupt();
+ changed |= *res;
+ return WalkResult::advance();
+ });
+ if (rtCallLower.wasInterrupted())
+ return signalPassFailure();
+
+ moduleOp->walk([&](mlir::omp::WorkdistributeOp workdistribute) {
+ changed |= workdistributeDoLower(workdistribute, targetOpsToProcess);
+ });
+
+ moduleOp->walk([&](mlir::omp::TeamsOp teams) {
+ changed |= teamsWorkdistributeToSingleOp(teams, targetOpsToProcess);
+ });
+ if (changed) {
+ bool isTargetDevice =
+ llvm::cast<mlir::omp::OffloadModuleInterface>(*moduleOp)
+ .getIsTargetDevice();
+ IRRewriter rewriter(&context);
+ for (auto targetOp : targetOpsToProcess) {
+ auto res = splitTargetData(targetOp, rewriter);
+ if (failed(res))
+ return signalPassFailure();
+ if (*res) {
+ if (failed(fissionTarget(*res, rewriter, moduleOp, isTargetDevice)))
+ return signalPassFailure();
+ }
+ }
+ }
+ }
+};
+} // namespace
diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp
index a83b066..1ecb6d3 100644
--- a/flang/lib/Optimizer/Passes/Pipelines.cpp
+++ b/flang/lib/Optimizer/Passes/Pipelines.cpp
@@ -301,8 +301,10 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm,
addNestedPassToAllTopLevelOperations<PassConstructor>(
pm, hlfir::createInlineHLFIRAssign);
pm.addPass(hlfir::createConvertHLFIRtoFIR());
- if (enableOpenMP != EnableOpenMP::None)
+ if (enableOpenMP != EnableOpenMP::None) {
pm.addPass(flangomp::createLowerWorkshare());
+ pm.addPass(flangomp::createLowerWorkdistribute());
+ }
if (enableOpenMP == EnableOpenMP::Simd)
pm.addPass(flangomp::createSimdOnlyPass());
}
diff --git a/flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir b/flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir
new file mode 100644
index 0000000..0c3f3fe
--- /dev/null
+++ b/flang/test/Fir/OpenACC/recipe-populate-firstprivate.mlir
@@ -0,0 +1,166 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(test-acc-recipe-populate{recipe-type=firstprivate})" | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+// Test scalar type (f32)
+// CHECK: acc.firstprivate.recipe @firstprivate_scalar : !fir.ref<f32> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca f32
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "scalar"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<f32>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<f32>, %[[DST:.*]]: !fir.ref<f32>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<f32>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<f32>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_scalar() {
+ %0 = fir.alloca f32 {test.var = "scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test integer scalar
+// CHECK: acc.firstprivate.recipe @firstprivate_int : !fir.ref<i32> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca i32
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "int"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<i32>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<i32>, %[[DST:.*]]: !fir.ref<i32>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<i32>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<i32>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_int() {
+ %0 = fir.alloca i32 {test.var = "int"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test logical type
+// CHECK: acc.firstprivate.recipe @firstprivate_logical : !fir.ref<!fir.logical<4>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.logical<4>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "logical"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.logical<4>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.logical<4>>, %[[DST:.*]]: !fir.ref<!fir.logical<4>>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<!fir.logical<4>>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<!fir.logical<4>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_logical() {
+ %0 = fir.alloca !fir.logical<4> {test.var = "logical"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test complex type
+// CHECK: acc.firstprivate.recipe @firstprivate_complex : !fir.ref<complex<f32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<complex<f32>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca complex<f32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "complex"} : (!fir.ref<complex<f32>>) -> (!fir.ref<complex<f32>>, !fir.ref<complex<f32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<complex<f32>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<complex<f32>>, %[[DST:.*]]: !fir.ref<complex<f32>>):
+// CHECK: %[[LOAD:.*]] = fir.load %[[SRC]] : !fir.ref<complex<f32>>
+// CHECK: fir.store %[[LOAD]] to %[[DST]] : !fir.ref<complex<f32>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_complex() {
+ %0 = fir.alloca complex<f32> {test.var = "complex"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 1D static array
+// CHECK: acc.firstprivate.recipe @firstprivate_array_1d : !fir.ref<!fir.array<100xf32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
+// CHECK: %[[C100:.*]] = arith.constant 100 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C100]] : (index) -> !fir.shape<1>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<100xf32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_1d"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<100xf32>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.array<100xf32>>, %[[DST:.*]]: !fir.ref<!fir.array<100xf32>>):
+// CHECK: hlfir.assign %[[SRC]] to %[[DST]] : !fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_1d() {
+ %0 = fir.alloca !fir.array<100xf32> {test.var = "array_1d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 2D static array
+// CHECK: acc.firstprivate.recipe @firstprivate_array_2d : !fir.ref<!fir.array<10x20xi32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<10x20xi32>>):
+// CHECK: %[[C10:.*]] = arith.constant 10 : index
+// CHECK: %[[C20:.*]] = arith.constant 20 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C10]], %[[C20]] : (index, index) -> !fir.shape<2>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<10x20xi32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_2d"} : (!fir.ref<!fir.array<10x20xi32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<10x20xi32>>, !fir.ref<!fir.array<10x20xi32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<10x20xi32>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.array<10x20xi32>>, %[[DST:.*]]: !fir.ref<!fir.array<10x20xi32>>):
+// CHECK: hlfir.assign %[[SRC]] to %[[DST]] : !fir.ref<!fir.array<10x20xi32>>, !fir.ref<!fir.array<10x20xi32>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_2d() {
+ %0 = fir.alloca !fir.array<10x20xi32> {test.var = "array_2d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test derived type with multiple fields
+// CHECK: acc.firstprivate.recipe @firstprivate_derived : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "derived"} : (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>) -> (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>
+// CHECK: } copy {
+// CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, %[[DST:.*]]: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>):
+// CHECK: hlfir.assign %[[SRC]] to %[[DST]] : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>
+// CHECK: acc.terminator
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_derived() {
+ %0 = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}> {test.var = "derived"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
diff --git a/flang/test/Fir/OpenACC/recipe-populate-private.mlir b/flang/test/Fir/OpenACC/recipe-populate-private.mlir
new file mode 100644
index 0000000..aeb60d6
--- /dev/null
+++ b/flang/test/Fir/OpenACC/recipe-populate-private.mlir
@@ -0,0 +1,223 @@
+// RUN: fir-opt %s --split-input-file --pass-pipeline="builtin.module(test-acc-recipe-populate{recipe-type=private})" | FileCheck %s
+
+// The tests here use a synthetic hlfir.declare in order to ensure that the hlfir dialect is
+// loaded. This is required because the pass used is part of OpenACC test passes outside of
+// flang and the APIs being test may generate hlfir even when it does not appear.
+
+// Test scalar type (f32)
+// CHECK: acc.private.recipe @private_scalar : !fir.ref<f32> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca f32
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "scalar"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<f32>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_scalar() {
+ %0 = fir.alloca f32 {test.var = "scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test logical type
+// CHECK: acc.private.recipe @private_logical : !fir.ref<!fir.logical<4>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.logical<4>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "logical"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.logical<4>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_logical() {
+ %0 = fir.alloca !fir.logical<4> {test.var = "logical"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test complex type
+// CHECK: acc.private.recipe @private_complex : !fir.ref<complex<f32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<complex<f32>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca complex<f32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "complex"} : (!fir.ref<complex<f32>>) -> (!fir.ref<complex<f32>>, !fir.ref<complex<f32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<complex<f32>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_complex() {
+ %0 = fir.alloca complex<f32> {test.var = "complex"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 1D static array
+// CHECK: acc.private.recipe @private_array_1d : !fir.ref<!fir.array<100xf32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
+// CHECK: %[[C100:.*]] = arith.constant 100 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C100]] : (index) -> !fir.shape<1>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<100xf32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_1d"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<100xf32>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_1d() {
+ %0 = fir.alloca !fir.array<100xf32> {test.var = "array_1d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test 3D static array
+// CHECK: acc.private.recipe @private_array_3d : !fir.ref<!fir.array<5x10x15xi32>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<5x10x15xi32>>):
+// CHECK: %[[C5:.*]] = arith.constant 5 : index
+// CHECK: %[[C10:.*]] = arith.constant 10 : index
+// CHECK: %[[C15:.*]] = arith.constant 15 : index
+// CHECK: %[[SHAPE:.*]] = fir.shape %[[C5]], %[[C10]], %[[C15]] : (index, index, index) -> !fir.shape<3>
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.array<5x10x15xi32>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]](%[[SHAPE]]) {uniq_name = "array_3d"} : (!fir.ref<!fir.array<5x10x15xi32>>, !fir.shape<3>) -> (!fir.ref<!fir.array<5x10x15xi32>>, !fir.ref<!fir.array<5x10x15xi32>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.array<5x10x15xi32>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_array_3d() {
+ %0 = fir.alloca !fir.array<5x10x15xi32> {test.var = "array_3d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test derived type with multiple fields
+// CHECK: acc.private.recipe @private_derived : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>):
+// CHECK: %[[ALLOC:.*]] = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[ALLOC]] {uniq_name = "derived"} : (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>) -> (!fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>, !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.type<_QTpoint{x:f32,y:f32,z:f32}>>
+// CHECK: }
+// CHECK-NOT: destroy
+
+func.func @test_derived() {
+ %0 = fir.alloca !fir.type<_QTpoint{x:f32,y:f32,z:f32}> {test.var = "derived"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with heap scalar (needs destroy)
+// CHECK: acc.private.recipe @private_box_heap_scalar : !fir.ref<!fir.box<!fir.heap<f64>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<f64>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.heap<f64>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_heap_scalar"} : (!fir.ref<!fir.box<!fir.heap<f64>>>) -> (!fir.ref<!fir.box<!fir.heap<f64>>>, !fir.ref<!fir.box<!fir.heap<f64>>>)
+// CHECK: %[[SCALAR:.*]] = fir.allocmem f64
+// CHECK: %[[EMBOX:.*]] = fir.embox %[[SCALAR]] : (!fir.heap<f64>) -> !fir.box<!fir.heap<f64>>
+// CHECK: fir.store %[[EMBOX]] to %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<f64>>>
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<f64>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<f64>>>, %{{.*}}: !fir.ref<!fir.box<!fir.heap<f64>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_heap_scalar() {
+ %0 = fir.alloca !fir.box<!fir.heap<f64>> {test.var = "box_heap_scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with pointer scalar (needs destroy)
+// CHECK: acc.private.recipe @private_box_ptr_scalar : !fir.ref<!fir.box<!fir.ptr<i32>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<i32>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.ptr<i32>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_ptr_scalar"} : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> (!fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.ref<!fir.box<!fir.ptr<i32>>>)
+// CHECK: %[[SCALAR:.*]] = fir.allocmem i32
+// CHECK: %[[EMBOX:.*]] = fir.embox %[[SCALAR]] : (!fir.heap<i32>) -> !fir.box<!fir.ptr<i32>>
+// CHECK: fir.store %[[EMBOX]] to %{{.*}}#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<i32>>>, %{{.*}}: !fir.ref<!fir.box<!fir.ptr<i32>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_ptr_scalar() {
+ %0 = fir.alloca !fir.box<!fir.ptr<i32>> {test.var = "box_ptr_scalar"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with 1D heap array (needs destroy)
+// CHECK: acc.private.recipe @private_box_heap_array_1d : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_heap_array_1d"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, %{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_heap_array_1d() {
+ %0 = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {test.var = "box_heap_array_1d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with 2D heap array (needs destroy)
+// CHECK: acc.private.recipe @private_box_heap_array_2d : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xi64>>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_heap_array_2d"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>, %{{.*}}: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xi64>>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_heap_array_2d() {
+ %0 = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xi64>>> {test.var = "box_heap_array_2d"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// -----
+
+// Test box type with pointer array (needs destroy)
+// CHECK: acc.private.recipe @private_box_ptr_array : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>> init {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>):
+// CHECK: %[[BOXALLOC:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?xf32>>>
+// CHECK: %{{.*}}:2 = hlfir.declare %[[BOXALLOC]] {uniq_name = "box_ptr_array"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>)
+// CHECK: acc.yield %{{.*}}#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
+// CHECK: } destroy {
+// CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, %{{.*}}: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>):
+// CHECK: acc.terminator
+// CHECK: }
+
+func.func @test_box_ptr_array() {
+ %0 = fir.alloca !fir.box<!fir.ptr<!fir.array<?xf32>>> {test.var = "box_ptr_array"}
+ %var = fir.alloca f32
+ %1:2 = hlfir.declare %var {uniq_name = "load_hlfir"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index 195e5ad..59f6c73 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -69,6 +69,7 @@ func.func @_QQmain() {
// PASSES-NEXT: InlineHLFIRAssign
// PASSES-NEXT: ConvertHLFIRtoFIR
// PASSES-NEXT: LowerWorkshare
+// PASSES-NEXT: LowerWorkdistribute
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
diff --git a/flang/test/Lower/OpenMP/workdistribute-multiple.f90 b/flang/test/Lower/OpenMP/workdistribute-multiple.f90
new file mode 100644
index 0000000..cf1d9dd
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-multiple.f90
@@ -0,0 +1,20 @@
+! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - 2>&1 | FileCheck %s
+
+! CHECK: error: teams has multiple workdistribute ops.
+! CHECK-LABEL: func @_QPteams_workdistribute_1
+subroutine teams_workdistribute_1()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams
+
+ !$omp workdistribute
+ y = a * x + y
+ !$omp end workdistribute
+
+ !$omp workdistribute
+ y = a * y + x
+ !$omp end workdistribute
+ !$omp end teams
+end subroutine teams_workdistribute_1
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f90
new file mode 100644
index 0000000..b2dbc0f
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-1d.f90
@@ -0,0 +1,39 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ !$omp target teams workdistribute
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ !$omp teams workdistribute
+ y = a * x + y
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f90
new file mode 100644
index 0000000..09e1211
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-2d.f90
@@ -0,0 +1,45 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ !$omp target teams workdistribute
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ !$omp teams workdistribute
+ y = a * x + y
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f90
new file mode 100644
index 0000000..cf5d023
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-3d.f90
@@ -0,0 +1,47 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute(a, x, y, rows, cols, depth)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols, depth
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols, depth) :: x, y
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+ ! CHECK: fir.do_loop
+
+ !$omp target teams workdistribute
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute(a, x, y, rows, cols, depth)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols, depth
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols, depth) :: x, y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+ ! CHECK: fir.do_loop
+
+ !$omp teams workdistribute
+ y = a * x + y
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f90
new file mode 100644
index 0000000..516c460
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-and-scalar-assign.f90
@@ -0,0 +1,53 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp target teams workdistribute
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ y = a * x + y
+
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ y = 2.0_real32
+
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams workdistribute
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ y = a * x + y
+
+ ! CHECK: fir.call @_FortranAAssign
+ y = 2.0_real32
+
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f90 b/flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f90
new file mode 100644
index 0000000..4aeb2e8
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-saxpy-two-2d.f90
@@ -0,0 +1,68 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+subroutine target_teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ !$omp target teams workdistribute
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * x + y
+
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * y + x
+
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
+
+! CHECK-LABEL: func @_QPteams_workdistribute
+subroutine teams_workdistribute(a, x, y, rows, cols)
+ use iso_fortran_env
+ implicit none
+
+ integer, intent(in) :: rows, cols
+ real(kind=real32) :: a
+ real(kind=real32), dimension(rows, cols) :: x, y
+
+ !$omp teams workdistribute
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * x + y
+
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+ ! CHECK: fir.do_loop
+
+ y = a * y + x
+
+ !$omp end teams workdistribute
+end subroutine teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-scalar-assign.f90 b/flang/test/Lower/OpenMP/workdistribute-scalar-assign.f90
new file mode 100644
index 0000000..3062b35
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-scalar-assign.f90
@@ -0,0 +1,29 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute_scalar_assign
+subroutine target_teams_workdistribute_scalar_assign()
+ integer :: aa(10)
+
+ ! CHECK: omp.target_data
+ ! CHECK: omp.target
+ ! CHECK: omp.teams
+ ! CHECK: omp.parallel
+ ! CHECK: omp.distribute
+ ! CHECK: omp.wsloop
+ ! CHECK: omp.loop_nest
+
+ !$omp target teams workdistribute
+ aa = 20
+ !$omp end target teams workdistribute
+
+end subroutine target_teams_workdistribute_scalar_assign
+
+! CHECK-LABEL: func @_QPteams_workdistribute_scalar_assign
+subroutine teams_workdistribute_scalar_assign()
+ integer :: aa(10)
+ ! CHECK: fir.call @_FortranAAssign
+ !$omp teams workdistribute
+ aa = 20
+ !$omp end teams workdistribute
+
+end subroutine teams_workdistribute_scalar_assign
diff --git a/flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f90 b/flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f90
new file mode 100644
index 0000000..4a08e53
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-target-teams-clauses.f90
@@ -0,0 +1,32 @@
+! RUN: %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - | FileCheck %s
+
+! CHECK-LABEL: func @_QPtarget_teams_workdistribute
+! CHECK: omp.target_data map_entries({{.*}})
+! CHECK: omp.target thread_limit({{.*}}) host_eval({{.*}}) map_entries({{.*}})
+! CHECK: omp.teams num_teams({{.*}})
+! CHECK: omp.parallel
+! CHECK: omp.distribute
+! CHECK: omp.wsloop
+! CHECK: omp.loop_nest
+
+subroutine target_teams_workdistribute()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ integer :: i
+
+ a = 2.0_real32
+ x = [(real(i, real32), i = 1, 10)]
+ y = [(real(i * 0.5, real32), i = 1, 10)]
+
+ !$omp target teams workdistribute &
+ !$omp& num_teams(4) &
+ !$omp& thread_limit(8) &
+ !$omp& default(shared) &
+ !$omp& private(i) &
+ !$omp& map(to: x) &
+ !$omp& map(tofrom: y)
+ y = a * x + y
+ !$omp end target teams workdistribute
+end subroutine target_teams_workdistribute
diff --git a/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f90 b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f90
new file mode 100644
index 0000000..f9c5a77
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-after.f90
@@ -0,0 +1,22 @@
+! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - 2>&1 | FileCheck %s
+
+! CHECK: error: teams has omp ops other than workdistribute. Lowering not implemented yet.
+! CHECK-LABEL: func @_QPteams_workdistribute_1
+subroutine teams_workdistribute_1()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams
+
+ !$omp workdistribute
+ y = a * x + y
+ !$omp end workdistribute
+
+ !$omp distribute
+ do i = 1, 10
+ x(i) = real(i, kind=real32)
+ end do
+ !$omp end distribute
+ !$omp end teams
+end subroutine teams_workdistribute_1
diff --git a/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f90 b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f90
new file mode 100644
index 0000000..3ef7f90
--- /dev/null
+++ b/flang/test/Lower/OpenMP/workdistribute-teams-unsupported-before.f90
@@ -0,0 +1,22 @@
+! RUN: not %flang_fc1 -emit-fir -fopenmp -fopenmp-version=60 %s -o - 2>&1 | FileCheck %s
+
+! CHECK: error: teams has omp ops other than workdistribute. Lowering not implemented yet.
+! CHECK-LABEL: func @_QPteams_workdistribute_1
+subroutine teams_workdistribute_1()
+ use iso_fortran_env
+ real(kind=real32) :: a
+ real(kind=real32), dimension(10) :: x
+ real(kind=real32), dimension(10) :: y
+ !$omp teams
+
+ !$omp distribute
+ do i = 1, 10
+ x(i) = real(i, kind=real32)
+ end do
+ !$omp end distribute
+
+ !$omp workdistribute
+ y = a * x + y
+ !$omp end workdistribute
+ !$omp end teams
+end subroutine teams_workdistribute_1
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir
new file mode 100644
index 0000000..00d10d6
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-doloop.mlir
@@ -0,0 +1,33 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+
+// CHECK-LABEL: func.func @x({{.*}})
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_1:.*]]) : index = (%[[ARG0:.*]]) to (%[[ARG1:.*]]) inclusive step (%[[ARG2:.*]]) {
+// CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
+// CHECK: fir.store %[[VAL_0]] to %[[ARG4:.*]] : !fir.ref<index>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+func.func @x(%lb : index, %ub : index, %step : index, %b : i1, %addr : !fir.ref<index>) {
+ omp.teams {
+ omp.workdistribute {
+ fir.do_loop %iv = %lb to %ub step %step unordered {
+ %zero = arith.constant 0 : index
+ fir.store %zero to %addr : !fir.ref<index>
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir
new file mode 100644
index 0000000..04e60ca
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-host.mlir
@@ -0,0 +1,117 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+// Test lowering of workdistribute after fission on host device.
+
+// CHECK-LABEL: func.func @x(
+// CHECK: %[[VAL_0:.*]] = fir.alloca index {bindc_name = "lb"}
+// CHECK: fir.store %[[ARG0:.*]] to %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_1:.*]] = fir.alloca index {bindc_name = "ub"}
+// CHECK: fir.store %[[ARG1:.*]] to %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_2:.*]] = fir.alloca index {bindc_name = "step"}
+// CHECK: fir.store %[[ARG2:.*]] to %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_3:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_4:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_5:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_6:.*]] = omp.map.info var_ptr(%[[ARG3:.*]] : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: %[[VAL_7:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_8:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_9:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_10:.*]] = omp.map.info var_ptr(%[[ARG3]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: omp.target_data map_entries(%[[VAL_3]], %[[VAL_4]], %[[VAL_5]], %[[VAL_6]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+// CHECK: %[[VAL_11:.*]] = fir.alloca index
+// CHECK: %[[VAL_12:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_13:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_14:.*]] = fir.alloca index
+// CHECK: %[[VAL_15:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_16:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_17:.*]] = fir.alloca index
+// CHECK: %[[VAL_18:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_19:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_20:.*]] = fir.alloca !fir.heap<index>
+// CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(from) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_22:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(to) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_23:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_27:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_25]], %[[VAL_25]] : index
+// CHECK: %[[VAL_29:.*]] = omp.target_allocmem %[[VAL_23]] : i32, index, %[[VAL_27]] {uniq_name = "dev_buf"}
+// CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_29]] : (i64) -> !fir.heap<index>
+// CHECK: fir.store %[[VAL_24]] to %[[VAL_11]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_25]] to %[[VAL_14]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_26]] to %[[VAL_17]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_30]] to %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: omp.target host_eval(%[[VAL_24]] -> %[[VAL_31:.*]], %[[VAL_25]] -> %[[VAL_32:.*]], %[[VAL_26]] -> %[[VAL_33:.*]] : index, index, index) map_entries(%[[VAL_7]] -> %[[VAL_34:.*]], %[[VAL_8]] -> %[[VAL_35:.*]], %[[VAL_9]] -> %[[VAL_36:.*]], %[[VAL_10]] -> %[[VAL_37:.*]], %[[VAL_13]] -> %[[VAL_38:.*]], %[[VAL_16]] -> %[[VAL_39:.*]], %[[VAL_19]] -> %[[VAL_40:.*]], %[[VAL_22]] -> %[[VAL_41:.*]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<!fir.heap<index>>) {
+// CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_38]] : !fir.ref<index>
+// CHECK: %[[VAL_43:.*]] = fir.load %[[VAL_39]] : !fir.ref<index>
+// CHECK: %[[VAL_44:.*]] = fir.load %[[VAL_40]] : !fir.ref<index>
+// CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_41]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_43]], %[[VAL_43]] : index
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_47:.*]]) : index = (%[[VAL_31]]) to (%[[VAL_32]]) inclusive step (%[[VAL_33]]) {
+// CHECK: fir.store %[[VAL_46]] to %[[VAL_45]] : !fir.heap<index>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_48:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_11]] : !fir.ref<index>
+// CHECK: %[[VAL_50:.*]] = fir.load %[[VAL_14]] : !fir.ref<index>
+// CHECK: %[[VAL_51:.*]] = fir.load %[[VAL_17]] : !fir.ref<index>
+// CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_53:.*]] = arith.addi %[[VAL_50]], %[[VAL_50]] : index
+// CHECK: fir.store %[[VAL_49]] to %[[VAL_52]] : !fir.heap<index>
+// CHECK: %[[VAL_54:.*]] = fir.convert %[[VAL_52]] : (!fir.heap<index>) -> i64
+// CHECK: omp.target_freemem %[[VAL_48]], %[[VAL_54]] : i32, i64
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
+module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_gpu = false, omp.is_target_device = false} {
+func.func @x(%lb : index, %ub : index, %step : index, %addr : !fir.ref<index>) {
+ %lb_ref = fir.alloca index {bindc_name = "lb"}
+ fir.store %lb to %lb_ref : !fir.ref<index>
+ %ub_ref = fir.alloca index {bindc_name = "ub"}
+ fir.store %ub to %ub_ref : !fir.ref<index>
+ %step_ref = fir.alloca index {bindc_name = "step"}
+ fir.store %step to %step_ref : !fir.ref<index>
+
+ %lb_map = omp.map.info var_ptr(%lb_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+ %ub_map = omp.map.info var_ptr(%ub_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+ %step_map = omp.map.info var_ptr(%step_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+ %addr_map = omp.map.info var_ptr(%addr : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+
+ omp.target map_entries(%lb_map -> %ARG0, %ub_map -> %ARG1, %step_map -> %ARG2, %addr_map -> %ARG3 : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+ %lb_val = fir.load %ARG0 : !fir.ref<index>
+ %ub_val = fir.load %ARG1 : !fir.ref<index>
+ %step_val = fir.load %ARG2 : !fir.ref<index>
+ %one = arith.constant 1 : index
+
+ %20 = arith.addi %ub_val, %ub_val : index
+ omp.teams {
+ omp.workdistribute {
+ %dev_mem = fir.allocmem index, %one {uniq_name = "dev_buf"}
+ fir.do_loop %iv = %lb_val to %ub_val step %step_val unordered {
+ fir.store %20 to %dev_mem : !fir.heap<index>
+ }
+ fir.store %lb_val to %dev_mem : !fir.heap<index>
+ fir.freemem %dev_mem : !fir.heap<index>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir
new file mode 100644
index 0000000..062eb70
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-fission-target.mlir
@@ -0,0 +1,118 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+// Test lowering of workdistribute after fission on host device.
+
+// CHECK-LABEL: func.func @x(
+// CHECK: %[[VAL_0:.*]] = fir.alloca index {bindc_name = "lb"}
+// CHECK: fir.store %[[ARG0:.*]] to %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_1:.*]] = fir.alloca index {bindc_name = "ub"}
+// CHECK: fir.store %[[ARG1:.*]] to %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_2:.*]] = fir.alloca index {bindc_name = "step"}
+// CHECK: fir.store %[[ARG2:.*]] to %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_3:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_4:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_5:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_6:.*]] = omp.map.info var_ptr(%[[ARG3:.*]] : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: %[[VAL_7:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+// CHECK: %[[VAL_8:.*]] = omp.map.info var_ptr(%[[VAL_1]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+// CHECK: %[[VAL_9:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "step"}
+// CHECK: %[[VAL_10:.*]] = omp.map.info var_ptr(%[[ARG3]] : !fir.ref<index>, index) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+// CHECK: omp.target_data map_entries(%[[VAL_3]], %[[VAL_4]], %[[VAL_5]], %[[VAL_6]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+// CHECK: %[[VAL_11:.*]] = fir.alloca index
+// CHECK: %[[VAL_12:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_13:.*]] = omp.map.info var_ptr(%[[VAL_11]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_14:.*]] = fir.alloca index
+// CHECK: %[[VAL_15:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_16:.*]] = omp.map.info var_ptr(%[[VAL_14]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_17:.*]] = fir.alloca index
+// CHECK: %[[VAL_18:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(from) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_19:.*]] = omp.map.info var_ptr(%[[VAL_17]] : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_20:.*]] = fir.alloca !fir.heap<index>
+// CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(from) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_from"}
+// CHECK: %[[VAL_22:.*]] = omp.map.info var_ptr(%[[VAL_20]] : !fir.ref<!fir.heap<index>>, !fir.heap<index>) map_clauses(to) capture(ByRef) -> !fir.ref<!fir.heap<index>> {name = "__flang_workdistribute_to"}
+// CHECK: %[[VAL_23:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_0]] : !fir.ref<index>
+// CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_1]] : !fir.ref<index>
+// CHECK: %[[VAL_26:.*]] = fir.load %[[VAL_2]] : !fir.ref<index>
+// CHECK: %[[VAL_27:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_25]], %[[VAL_25]] : index
+// CHECK: %[[VAL_29:.*]] = omp.target_allocmem %[[VAL_23]] : i32, index, %[[VAL_27]] {uniq_name = "dev_buf"}
+// CHECK: %[[VAL_30:.*]] = fir.convert %[[VAL_29]] : (i64) -> !fir.heap<index>
+// CHECK: fir.store %[[VAL_24]] to %[[VAL_11]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_25]] to %[[VAL_14]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_26]] to %[[VAL_17]] : !fir.ref<index>
+// CHECK: fir.store %[[VAL_30]] to %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: omp.target map_entries(%[[VAL_7]] -> %[[VAL_31:.*]], %[[VAL_8]] -> %[[VAL_32:.*]], %[[VAL_9]] -> %[[VAL_33:.*]], %[[VAL_10]] -> %[[VAL_34:.*]], %[[VAL_13]] -> %[[VAL_35:.*]], %[[VAL_16]] -> %[[VAL_36:.*]], %[[VAL_19]] -> %[[VAL_37:.*]], %[[VAL_22]] -> %[[VAL_38:.*]] : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<!fir.heap<index>>) {
+// CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_35]] : !fir.ref<index>
+// CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_36]] : !fir.ref<index>
+// CHECK: %[[VAL_41:.*]] = fir.load %[[VAL_37]] : !fir.ref<index>
+// CHECK: %[[VAL_42:.*]] = fir.load %[[VAL_38]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_43:.*]] = arith.addi %[[VAL_40]], %[[VAL_40]] : index
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_44:.*]]) : index = (%[[VAL_39]]) to (%[[VAL_40]]) inclusive step (%[[VAL_41]]) {
+// CHECK: fir.store %[[VAL_43]] to %[[VAL_42]] : !fir.heap<index>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: %[[VAL_45:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_11]] : !fir.ref<index>
+// CHECK: %[[VAL_47:.*]] = fir.load %[[VAL_14]] : !fir.ref<index>
+// CHECK: %[[VAL_48:.*]] = fir.load %[[VAL_17]] : !fir.ref<index>
+// CHECK: %[[VAL_49:.*]] = fir.load %[[VAL_20]] : !fir.ref<!fir.heap<index>>
+// CHECK: %[[VAL_50:.*]] = arith.addi %[[VAL_47]], %[[VAL_47]] : index
+// CHECK: fir.store %[[VAL_46]] to %[[VAL_49]] : !fir.heap<index>
+// CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_49]] : (!fir.heap<index>) -> i64
+// CHECK: omp.target_freemem %[[VAL_45]], %[[VAL_51]] : i32, i64
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+
+
+module attributes {llvm.target_triple = "amdgcn-amd-amdhsa", omp.is_gpu = true, omp.is_target_device = true} {
+func.func @x(%lb : index, %ub : index, %step : index, %addr : !fir.ref<index>) {
+ %lb_ref = fir.alloca index {bindc_name = "lb"}
+ fir.store %lb to %lb_ref : !fir.ref<index>
+ %ub_ref = fir.alloca index {bindc_name = "ub"}
+ fir.store %ub to %ub_ref : !fir.ref<index>
+ %step_ref = fir.alloca index {bindc_name = "step"}
+ fir.store %step to %step_ref : !fir.ref<index>
+
+ %lb_map = omp.map.info var_ptr(%lb_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "lb"}
+ %ub_map = omp.map.info var_ptr(%ub_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "ub"}
+ %step_map = omp.map.info var_ptr(%step_ref : !fir.ref<index>, index) map_clauses(to) capture(ByRef) -> !fir.ref<index> {name = "step"}
+ %addr_map = omp.map.info var_ptr(%addr : !fir.ref<index>, index) map_clauses(tofrom) capture(ByRef) -> !fir.ref<index> {name = "addr"}
+
+ omp.target map_entries(%lb_map -> %ARG0, %ub_map -> %ARG1, %step_map -> %ARG2, %addr_map -> %ARG3 : !fir.ref<index>, !fir.ref<index>, !fir.ref<index>, !fir.ref<index>) {
+ %lb_val = fir.load %ARG0 : !fir.ref<index>
+ %ub_val = fir.load %ARG1 : !fir.ref<index>
+ %step_val = fir.load %ARG2 : !fir.ref<index>
+ %one = arith.constant 1 : index
+
+ %20 = arith.addi %ub_val, %ub_val : index
+ omp.teams {
+ omp.workdistribute {
+ %dev_mem = fir.allocmem index, %one {uniq_name = "dev_buf"}
+ fir.do_loop %iv = %lb_val to %ub_val step %step_val unordered {
+ fir.store %20 to %dev_mem : !fir.heap<index>
+ }
+ fir.store %lb_val to %dev_mem : !fir.heap<index>
+ fir.freemem %dev_mem : !fir.heap<index>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir
new file mode 100644
index 0000000..c562b70
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-fission.mlir
@@ -0,0 +1,71 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+
+// CHECK-LABEL: func.func @test_fission_workdistribute(
+// CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_2:.*]] = arith.constant 9 : index
+// CHECK: %[[VAL_3:.*]] = arith.constant 5.000000e+00 : f32
+// CHECK: fir.store %[[VAL_3]] to %[[ARG2:.*]] : !fir.ref<f32>
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_4:.*]]) : index = (%[[VAL_0]]) to (%[[VAL_2]]) inclusive step (%[[VAL_1]]) {
+// CHECK: %[[VAL_5:.*]] = fir.coordinate_of %[[ARG0:.*]], %[[VAL_4]] : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_5]] : !fir.ref<f32>
+// CHECK: %[[VAL_7:.*]] = fir.coordinate_of %[[ARG1:.*]], %[[VAL_4]] : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: fir.store %[[VAL_6]] to %[[VAL_7]] : !fir.ref<f32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: fir.call @regular_side_effect_func(%[[ARG2:.*]]) : (!fir.ref<f32>) -> ()
+// CHECK: fir.call @my_fir_parallel_runtime_func(%[[ARG3:.*]]) : (!fir.ref<f32>) -> ()
+// CHECK: fir.do_loop %[[VAL_8:.*]] = %[[VAL_0]] to %[[VAL_2]] step %[[VAL_1]] {
+// CHECK: %[[VAL_9:.*]] = fir.coordinate_of %[[ARG0]], %[[VAL_8]] : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: fir.store %[[VAL_3]] to %[[VAL_9]] : !fir.ref<f32>
+// CHECK: }
+// CHECK: %[[VAL_10:.*]] = fir.load %[[ARG2:.*]] : !fir.ref<f32>
+// CHECK: fir.store %[[VAL_10]] to %[[ARG3:.*]] : !fir.ref<f32>
+// CHECK: return
+// CHECK: }
+module {
+func.func @regular_side_effect_func(%arg0: !fir.ref<f32>) {
+ return
+}
+func.func @my_fir_parallel_runtime_func(%arg0: !fir.ref<f32>) attributes {fir.runtime} {
+ return
+}
+func.func @test_fission_workdistribute(%arr1: !fir.ref<!fir.array<10xf32>>, %arr2: !fir.ref<!fir.array<10xf32>>, %scalar_ref1: !fir.ref<f32>, %scalar_ref2: !fir.ref<f32>) {
+ %c0_idx = arith.constant 0 : index
+ %c1_idx = arith.constant 1 : index
+ %c9_idx = arith.constant 9 : index
+ %float_val = arith.constant 5.0 : f32
+ omp.teams {
+ omp.workdistribute {
+ fir.store %float_val to %scalar_ref1 : !fir.ref<f32>
+ fir.do_loop %iv = %c0_idx to %c9_idx step %c1_idx unordered {
+ %elem_ptr_arr1 = fir.coordinate_of %arr1, %iv : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+ %loaded_val_loop1 = fir.load %elem_ptr_arr1 : !fir.ref<f32>
+ %elem_ptr_arr2 = fir.coordinate_of %arr2, %iv : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+ fir.store %loaded_val_loop1 to %elem_ptr_arr2 : !fir.ref<f32>
+ }
+ fir.call @regular_side_effect_func(%scalar_ref1) : (!fir.ref<f32>) -> ()
+ fir.call @my_fir_parallel_runtime_func(%scalar_ref2) : (!fir.ref<f32>) -> ()
+ fir.do_loop %jv = %c0_idx to %c9_idx step %c1_idx {
+ %elem_ptr_ordered_loop = fir.coordinate_of %arr1, %jv : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+ fir.store %float_val to %elem_ptr_ordered_loop : !fir.ref<f32>
+ }
+ %loaded_for_hoist = fir.load %scalar_ref1 : !fir.ref<f32>
+ fir.store %loaded_for_hoist to %scalar_ref2 : !fir.ref<f32>
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+}
diff --git a/flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir b/flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir
new file mode 100644
index 0000000..03d5d71
--- /dev/null
+++ b/flang/test/Transforms/OpenMP/lower-workdistribute-runtime-assign-scalar.mlir
@@ -0,0 +1,108 @@
+// RUN: fir-opt --lower-workdistribute %s | FileCheck %s
+
+// Test lowering of workdistribute for a scalar assignment within a target teams workdistribute region.
+// The test checks that the scalar assignment is correctly lowered to wsloop and loop_nest operations.
+
+// Example Fortran code:
+// !$omp target teams workdistribute
+// y = 3.0_real32
+// !$omp end target teams workdistribute
+
+
+// CHECK-LABEL: func.func @x(
+// CHECK: omp.target {{.*}} {
+// CHECK: omp.teams {
+// CHECK: omp.parallel {
+// CHECK: omp.distribute {
+// CHECK: omp.wsloop {
+// CHECK: omp.loop_nest (%[[VAL_73:.*]]) : index = (%[[VAL_66:.*]]) to (%[[VAL_72:.*]]) inclusive step (%[[VAL_67:.*]]) {
+// CHECK: %[[VAL_74:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_75:.*]]:3 = fir.box_dims %[[VAL_64:.*]], %[[VAL_74]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+// CHECK: %[[VAL_76:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_77:.*]]:3 = fir.box_dims %[[VAL_64]], %[[VAL_76]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+// CHECK: %[[VAL_78:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_79:.*]] = arith.remsi %[[VAL_73]], %[[VAL_77]]#1 : index
+// CHECK: %[[VAL_80:.*]] = arith.addi %[[VAL_79]], %[[VAL_78]] : index
+// CHECK: %[[VAL_81:.*]] = arith.divsi %[[VAL_73]], %[[VAL_77]]#1 : index
+// CHECK: %[[VAL_82:.*]] = arith.remsi %[[VAL_81]], %[[VAL_75]]#1 : index
+// CHECK: %[[VAL_83:.*]] = arith.addi %[[VAL_82]], %[[VAL_78]] : index
+// CHECK: %[[VAL_84:.*]] = fir.array_coor %[[VAL_64]] %[[VAL_83]], %[[VAL_80]] : (!fir.box<!fir.array<?x?xf32>>, index, index) -> !fir.ref<f32>
+// CHECK: fir.store %[[VAL_65:.*]] to %[[VAL_84]] : !fir.ref<f32>
+// CHECK: omp.yield
+// CHECK: }
+// CHECK: } {omp.composite}
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: } {omp.composite}
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: omp.terminator
+// CHECK: }
+// CHECK: return
+// CHECK: }
+// CHECK: func.func private @_FortranAAssign(!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<i8>, i32) attributes {fir.runtime}
+
+module attributes {llvm.target_triple = "amdgcn-amd-amdhsa", omp.is_gpu = true, omp.is_target_device = true} {
+func.func @x(%arr : !fir.ref<!fir.array<?x?xf32>>) {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c78 = arith.constant 78 : index
+ %cst = arith.constant 3.000000e+00 : f32
+ %0 = fir.alloca i32
+ %1 = fir.alloca i32
+ %c10 = arith.constant 10 : index
+ %c20 = arith.constant 20 : index
+ %194 = arith.subi %c10, %c1 : index
+ %195 = omp.map.bounds lower_bound(%c0 : index) upper_bound(%194 : index) extent(%c10 : index) stride(%c1 : index) start_idx(%c1 : index)
+ %196 = arith.subi %c20, %c1 : index
+ %197 = omp.map.bounds lower_bound(%c0 : index) upper_bound(%196 : index) extent(%c20 : index) stride(%c1 : index) start_idx(%c1 : index)
+ %198 = omp.map.info var_ptr(%arr : !fir.ref<!fir.array<?x?xf32>>, f32) map_clauses(implicit, tofrom) capture(ByRef) bounds(%195, %197) -> !fir.ref<!fir.array<?x?xf32>> {name = "y"}
+ %199 = omp.map.info var_ptr(%1 : !fir.ref<i32>, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = ""}
+ %200 = omp.map.info var_ptr(%0 : !fir.ref<i32>, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = ""}
+ omp.target map_entries(%198 -> %arg5, %199 -> %arg6, %200 -> %arg7 : !fir.ref<!fir.array<?x?xf32>>, !fir.ref<i32>, !fir.ref<i32>) {
+ %c0_0 = arith.constant 0 : index
+ %201 = fir.load %arg7 : !fir.ref<i32>
+ %202 = fir.load %arg6 : !fir.ref<i32>
+ %203 = fir.convert %202 : (i32) -> i64
+ %204 = fir.convert %201 : (i32) -> i64
+ %205 = fir.convert %204 : (i64) -> index
+ %206 = arith.cmpi sgt, %205, %c0_0 : index
+ %207 = fir.convert %203 : (i64) -> index
+ %208 = arith.cmpi sgt, %207, %c0_0 : index
+ %209 = arith.select %208, %207, %c0_0 : index
+ %210 = arith.select %206, %205, %c0_0 : index
+ %211 = fir.shape %210, %209 : (index, index) -> !fir.shape<2>
+ %212 = fir.declare %arg5(%211) {uniq_name = "_QFFaxpy_array_workdistributeEy"} : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.ref<!fir.array<?x?xf32>>
+ %213 = fir.embox %212(%211) : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.array<?x?xf32>>
+ omp.teams {
+ %214 = fir.alloca !fir.box<!fir.array<?x?xf32>> {pinned}
+ omp.workdistribute {
+ %215 = fir.alloca f32
+ %216 = fir.embox %215 : (!fir.ref<f32>) -> !fir.box<f32>
+ %217 = fir.shape %210, %209 : (index, index) -> !fir.shape<2>
+ %218 = fir.embox %212(%217) : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.array<?x?xf32>>
+ fir.store %218 to %214 : !fir.ref<!fir.box<!fir.array<?x?xf32>>>
+ %219 = fir.address_of(@_QQclXf9c642d28e5bba1f07fa9a090b72f4fc) : !fir.ref<!fir.char<1,78>>
+ %c39_i32 = arith.constant 39 : i32
+ %220 = fir.convert %214 : (!fir.ref<!fir.box<!fir.array<?x?xf32>>>) -> !fir.ref<!fir.box<none>>
+ %221 = fir.convert %216 : (!fir.box<f32>) -> !fir.box<none>
+ %222 = fir.convert %219 : (!fir.ref<!fir.char<1,78>>) -> !fir.ref<i8>
+ fir.call @_FortranAAssign(%220, %221, %222, %c39_i32) : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<i8>, i32) -> ()
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.terminator
+ }
+ return
+}
+
+func.func private @_FortranAAssign(!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<i8>, i32) attributes {fir.runtime}
+
+fir.global linkonce @_QQclXf9c642d28e5bba1f07fa9a090b72f4fc constant : !fir.char<1,78> {
+ %0 = fir.string_lit "File: /work/github/skc7/llvm-project/build_fomp_reldebinfo/saxpy_tests/\00"(78) : !fir.char<1,78>
+ fir.has_value %0 : !fir.char<1,78>
+}
+}
diff --git a/libcxx/utils/compare-benchmarks b/libcxx/utils/compare-benchmarks
index 988e243..d165c73 100755
--- a/libcxx/utils/compare-benchmarks
+++ b/libcxx/utils/compare-benchmarks
@@ -65,9 +65,16 @@ def plain_text_comparison(data, metric, baseline_name=None, candidate_name=None)
"""
data = data.replace(numpy.nan, None) # avoid NaNs in tabulate output
headers = ['Benchmark', baseline_name, candidate_name, 'Difference', '% Difference']
- fmt = (None, '.2f', '.2f', '.2f', '.2f')
- table = data[['benchmark', f'{metric}_0', f'{metric}_1', 'difference', 'percent']].set_index('benchmark')
- return tabulate.tabulate(table, headers=headers, floatfmt=fmt, numalign='right')
+ fmt = (None, '.2f', '.2f', '.2f', '.2%')
+ table = data[['benchmark', f'{metric}_0', f'{metric}_1', 'difference', 'percent']]
+
+ # Compute the geomean and report on their difference
+ geomean_0 = statistics.geometric_mean(data[f'{metric}_0'].dropna())
+ geomean_1 = statistics.geometric_mean(data[f'{metric}_1'].dropna())
+ geomean_row = ['Geomean', geomean_0, geomean_1, (geomean_1 - geomean_0), (geomean_1 - geomean_0) / geomean_0]
+ table.loc[table.index.max()] = geomean_row
+
+ return tabulate.tabulate(table.set_index('benchmark'), headers=headers, floatfmt=fmt, numalign='right')
def create_chart(data, metric, subtitle=None, series_names=None):
"""
@@ -154,7 +161,7 @@ def main(argv):
# If we have exactly two data sets, compute additional info in new columns
if len(lnt_inputs) == 2:
data['difference'] = data[f'{args.metric}_1'] - data[f'{args.metric}_0']
- data['percent'] = 100 * (data['difference'] / data[f'{args.metric}_0'])
+ data['percent'] = data['difference'] / data[f'{args.metric}_0']
if args.filter is not None:
keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None]
diff --git a/lldb/include/lldb/Utility/DataExtractor.h b/lldb/include/lldb/Utility/DataExtractor.h
index 0b7e771..b4960f5 100644
--- a/lldb/include/lldb/Utility/DataExtractor.h
+++ b/lldb/include/lldb/Utility/DataExtractor.h
@@ -994,7 +994,7 @@ protected:
constexpr size_t src_size = sizeof(T);
T val = fail_value;
- const T *src = static_cast<const T *>(GetData(offset_ptr, src_size));
+ const void *src = GetData(offset_ptr, src_size);
if (!src)
return val;
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp
index c33760e..2b2ca08 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.cpp
@@ -423,6 +423,46 @@ Status ObjCLanguageRuntime::ObjCExceptionPrecondition::ConfigurePrecondition(
return error;
}
+CompilerType ObjCLanguageRuntime::LookupInModulesVendor(ConstString class_name,
+ Target &target) {
+ assert(class_name);
+
+ auto *persistent_state = llvm::cast<ClangPersistentVariables>(
+ target.GetPersistentExpressionStateForLanguage(lldb::eLanguageTypeC));
+ if (!persistent_state)
+ return {};
+
+ auto clang_modules_decl_vendor_sp =
+ persistent_state->GetClangModulesDeclVendor();
+ if (!clang_modules_decl_vendor_sp)
+ return {};
+
+ auto types = clang_modules_decl_vendor_sp->FindTypes(
+ class_name, /*max_matches*/ UINT32_MAX);
+ if (types.empty())
+ return {};
+
+ return types.front();
+}
+
+CompilerType ObjCLanguageRuntime::LookupInRuntime(ConstString class_name) {
+ auto *runtime_vendor = GetDeclVendor();
+ if (!runtime_vendor)
+ return {};
+
+ std::vector<CompilerDecl> compiler_decls;
+ runtime_vendor->FindDecls(class_name, false, UINT32_MAX, compiler_decls);
+ if (compiler_decls.empty())
+ return {};
+
+ auto *ctx =
+ llvm::dyn_cast<TypeSystemClang>(compiler_decls[0].GetTypeSystem());
+ if (!ctx)
+ return {};
+
+ return ctx->GetTypeForDecl(compiler_decls[0].GetOpaqueDecl());
+}
+
std::optional<CompilerType>
ObjCLanguageRuntime::GetRuntimeType(CompilerType base_type) {
CompilerType class_type;
@@ -442,18 +482,21 @@ ObjCLanguageRuntime::GetRuntimeType(CompilerType base_type) {
if (!class_name)
return std::nullopt;
- TypeSP complete_objc_class_type_sp = LookupInCompleteClassCache(class_name);
- if (!complete_objc_class_type_sp)
- return std::nullopt;
-
- CompilerType complete_class(
- complete_objc_class_type_sp->GetFullCompilerType());
- if (complete_class.GetCompleteType()) {
- if (is_pointer_type)
- return complete_class.GetPointerType();
- else
- return complete_class;
+ if (TypeSP complete_objc_class_type_sp =
+ LookupInCompleteClassCache(class_name)) {
+ if (CompilerType complete_class =
+ complete_objc_class_type_sp->GetFullCompilerType();
+ complete_class.GetCompleteType())
+ return is_pointer_type ? complete_class.GetPointerType() : complete_class;
}
+ assert(m_process);
+ if (CompilerType found =
+ LookupInModulesVendor(class_name, m_process->GetTarget()))
+ return is_pointer_type ? found.GetPointerType() : found;
+
+ if (CompilerType found = LookupInRuntime(class_name))
+ return is_pointer_type ? found.GetPointerType() : found;
+
return std::nullopt;
}
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h
index 45de098..cc8281e 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/ObjCLanguageRuntime.h
@@ -465,6 +465,10 @@ protected:
ObjCLanguageRuntime(const ObjCLanguageRuntime &) = delete;
const ObjCLanguageRuntime &operator=(const ObjCLanguageRuntime &) = delete;
+
+private:
+ CompilerType LookupInRuntime(ConstString class_name);
+ CompilerType LookupInModulesVendor(ConstString class_name, Target &process);
};
} // namespace lldb_private
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/Makefile b/lldb/test/API/lang/objc/ivar-in-framework-base/Makefile
new file mode 100644
index 0000000..c7947fc
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/Makefile
@@ -0,0 +1,6 @@
+OBJC_SOURCES := main.m lib.m
+LD_EXTRAS = -framework Foundation
+
+include Makefile.rules
+
+lib.o: CFLAGS = $(CFLAGS_NO_DEBUG)
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py b/lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py
new file mode 100644
index 0000000..40fc6b7
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/TestIvarInFrameworkBase.py
@@ -0,0 +1,39 @@
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class TestIvarInFrameworkBase(TestBase):
+ """
+ Tests whether LLDB's data inspection commands can correctly retrieve
+ information about ivars from the Objective-C runtime.
+ In this test-case we have a base class type for which we don't have access
+ to the debug-info of the implementation (mimicking the scenario of subclassing
+ a type from a system framework). LLDB won't be able to see the backing ivar for
+ 'fooProp' from just debug-info, but it will fall back on the runtime to get the
+ necessary information.
+ """
+
+ def test_frame_var(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(self, "break here", lldb.SBFileSpec("main.m"))
+ self.expect("frame variable *bar", substrs=["_fooProp = 10", "_barProp = 15"])
+
+ def test_expr(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(self, "break here", lldb.SBFileSpec("main.m"))
+ self.expect_expr(
+ "*bar",
+ result_type="Bar",
+ result_children=[
+ ValueCheck(
+ name="Foo",
+ children=[
+ ValueCheck(name="NSObject"),
+ ValueCheck(name="_fooProp", value="10"),
+ ],
+ ),
+ ValueCheck(name="_barProp", value="15"),
+ ],
+ )
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/lib.h b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.h
new file mode 100644
index 0000000..31ceb53
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.h
@@ -0,0 +1,6 @@
+#import <Foundation/Foundation.h>
+
+@interface Foo : NSObject
+@property int fooProp;
+- (id)init;
+@end
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/lib.m b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.m
new file mode 100644
index 0000000..e1bf80a
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/lib.m
@@ -0,0 +1,8 @@
+#import "lib.h"
+
+@implementation Foo
+- (id)init {
+ self.fooProp = 10;
+ return self;
+}
+@end
diff --git a/lldb/test/API/lang/objc/ivar-in-framework-base/main.m b/lldb/test/API/lang/objc/ivar-in-framework-base/main.m
new file mode 100644
index 0000000..1fd352e
--- /dev/null
+++ b/lldb/test/API/lang/objc/ivar-in-framework-base/main.m
@@ -0,0 +1,22 @@
+#import "lib.h"
+#include <stdio.h>
+
+@interface Bar : Foo
+@property int barProp;
+- (id)init;
+@end
+
+@implementation Bar
+
+- (id)init {
+ self = [super init];
+ self.barProp = 15;
+ return self;
+}
+@end
+
+int main() {
+ Bar *bar = [Bar new];
+ puts("break here");
+ return 0;
+}
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 8193adc..e062032 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -883,6 +883,8 @@ supported for the ``amdgcn`` target.
Buffer Fat Pointer 7 N/A N/A 160 0
Buffer Resource 8 N/A V# 128 0x00000000000000000000000000000000
Buffer Strided Pointer (experimental) 9 *TODO*
+ *reserved for downstream use* 10
+ *reserved for downstream use* 11
Streamout Registers 128 N/A GS_REGS
===================================== =============== =========== ================ ======= ============================
diff --git a/llvm/docs/CommandGuide/dsymutil.rst b/llvm/docs/CommandGuide/dsymutil.rst
index 8764e1f..8e61e01 100644
--- a/llvm/docs/CommandGuide/dsymutil.rst
+++ b/llvm/docs/CommandGuide/dsymutil.rst
@@ -75,14 +75,6 @@ OPTIONS
Make a static variable keep the enclosing function even if it would have been
omitted otherwise.
-.. option:: --minimize, -z
-
- When used when creating a dSYM file, this option will suppress the emission of
- the .debug_inlines, .debug_pubnames, and .debug_pubtypes sections since
- dsymutil currently has better equivalents: .apple_names and .apple_types. When
- used in conjunction with ``--update`` option, this option will cause redundant
- accelerator tables to be removed.
-
.. option:: --no-object-timestamp
Don't check timestamp for object files.
diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h
index a1bfce7..bccdb89 100644
--- a/llvm/include/llvm/ADT/APFloat.h
+++ b/llvm/include/llvm/ADT/APFloat.h
@@ -138,10 +138,16 @@ enum lostFraction { // Example of truncated bits:
/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
///
+namespace detail {
+class IEEEFloat;
+class DoubleAPFloat;
+} // namespace detail
+
// This is the common type definitions shared by APFloat and its internal
// implementation classes. This struct should not define any non-static data
// members.
-struct APFloatBase {
+class APFloatBase {
+public:
typedef APInt::WordType integerPart;
static constexpr unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
@@ -257,30 +263,64 @@ struct APFloatBase {
LLVM_ABI static const llvm::fltSemantics &EnumToSemantics(Semantics S);
LLVM_ABI static Semantics SemanticsToEnum(const llvm::fltSemantics &Sem);
- LLVM_ABI static const fltSemantics &IEEEhalf() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &BFloat() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &IEEEsingle() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &IEEEdouble() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &IEEEquad() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &PPCDoubleDouble() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &PPCDoubleDoubleLegacy() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E5M2() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E5M2FNUZ() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3FNUZ() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E4M3B11FNUZ() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E3M4() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &FloatTF32() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float8E8M0FNU() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float6E3M2FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float6E2M3FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &Float4E2M1FN() LLVM_READNONE;
- LLVM_ABI static const fltSemantics &x87DoubleExtended() LLVM_READNONE;
+private:
+ LLVM_ABI static const fltSemantics semIEEEhalf;
+ LLVM_ABI static const fltSemantics semBFloat;
+ LLVM_ABI static const fltSemantics semIEEEsingle;
+ LLVM_ABI static const fltSemantics semIEEEdouble;
+ LLVM_ABI static const fltSemantics semIEEEquad;
+ LLVM_ABI static const fltSemantics semFloat8E5M2;
+ LLVM_ABI static const fltSemantics semFloat8E5M2FNUZ;
+ LLVM_ABI static const fltSemantics semFloat8E4M3;
+ LLVM_ABI static const fltSemantics semFloat8E4M3FN;
+ LLVM_ABI static const fltSemantics semFloat8E4M3FNUZ;
+ LLVM_ABI static const fltSemantics semFloat8E4M3B11FNUZ;
+ LLVM_ABI static const fltSemantics semFloat8E3M4;
+ LLVM_ABI static const fltSemantics semFloatTF32;
+ LLVM_ABI static const fltSemantics semFloat8E8M0FNU;
+ LLVM_ABI static const fltSemantics semFloat6E3M2FN;
+ LLVM_ABI static const fltSemantics semFloat6E2M3FN;
+ LLVM_ABI static const fltSemantics semFloat4E2M1FN;
+ LLVM_ABI static const fltSemantics semX87DoubleExtended;
+ LLVM_ABI static const fltSemantics semBogus;
+ LLVM_ABI static const fltSemantics semPPCDoubleDouble;
+ LLVM_ABI static const fltSemantics semPPCDoubleDoubleLegacy;
+
+ friend class detail::IEEEFloat;
+ friend class detail::DoubleAPFloat;
+ friend class APFloat;
+
+public:
+ static const fltSemantics &IEEEhalf() { return semIEEEhalf; }
+ static const fltSemantics &BFloat() { return semBFloat; }
+ static const fltSemantics &IEEEsingle() { return semIEEEsingle; }
+ static const fltSemantics &IEEEdouble() { return semIEEEdouble; }
+ static const fltSemantics &IEEEquad() { return semIEEEquad; }
+ static const fltSemantics &PPCDoubleDouble() { return semPPCDoubleDouble; }
+ static const fltSemantics &PPCDoubleDoubleLegacy() {
+ return semPPCDoubleDoubleLegacy;
+ }
+ static const fltSemantics &Float8E5M2() { return semFloat8E5M2; }
+ static const fltSemantics &Float8E5M2FNUZ() { return semFloat8E5M2FNUZ; }
+ static const fltSemantics &Float8E4M3() { return semFloat8E4M3; }
+ static const fltSemantics &Float8E4M3FN() { return semFloat8E4M3FN; }
+ static const fltSemantics &Float8E4M3FNUZ() { return semFloat8E4M3FNUZ; }
+ static const fltSemantics &Float8E4M3B11FNUZ() {
+ return semFloat8E4M3B11FNUZ;
+ }
+ static const fltSemantics &Float8E3M4() { return semFloat8E3M4; }
+ static const fltSemantics &FloatTF32() { return semFloatTF32; }
+ static const fltSemantics &Float8E8M0FNU() { return semFloat8E8M0FNU; }
+ static const fltSemantics &Float6E3M2FN() { return semFloat6E3M2FN; }
+ static const fltSemantics &Float6E2M3FN() { return semFloat6E2M3FN; }
+ static const fltSemantics &Float4E2M1FN() { return semFloat4E2M1FN; }
+ static const fltSemantics &x87DoubleExtended() {
+ return semX87DoubleExtended;
+ }
/// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
/// anything real.
- LLVM_ABI static const fltSemantics &Bogus() LLVM_READNONE;
+ static const fltSemantics &Bogus() { return semBogus; }
// Returns true if any number described by this semantics can be precisely
// represented by the specified semantics. Does not take into account
@@ -927,69 +967,11 @@ class APFloat : public APFloatBase {
llvm_unreachable("Unexpected semantics");
}
- ~Storage() {
- if (usesLayout<IEEEFloat>(*semantics)) {
- IEEE.~IEEEFloat();
- return;
- }
- if (usesLayout<DoubleAPFloat>(*semantics)) {
- Double.~DoubleAPFloat();
- return;
- }
- llvm_unreachable("Unexpected semantics");
- }
-
- Storage(const Storage &RHS) {
- if (usesLayout<IEEEFloat>(*RHS.semantics)) {
- new (this) IEEEFloat(RHS.IEEE);
- return;
- }
- if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- new (this) DoubleAPFloat(RHS.Double);
- return;
- }
- llvm_unreachable("Unexpected semantics");
- }
-
- Storage(Storage &&RHS) {
- if (usesLayout<IEEEFloat>(*RHS.semantics)) {
- new (this) IEEEFloat(std::move(RHS.IEEE));
- return;
- }
- if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- new (this) DoubleAPFloat(std::move(RHS.Double));
- return;
- }
- llvm_unreachable("Unexpected semantics");
- }
-
- Storage &operator=(const Storage &RHS) {
- if (usesLayout<IEEEFloat>(*semantics) &&
- usesLayout<IEEEFloat>(*RHS.semantics)) {
- IEEE = RHS.IEEE;
- } else if (usesLayout<DoubleAPFloat>(*semantics) &&
- usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- Double = RHS.Double;
- } else if (this != &RHS) {
- this->~Storage();
- new (this) Storage(RHS);
- }
- return *this;
- }
-
- Storage &operator=(Storage &&RHS) {
- if (usesLayout<IEEEFloat>(*semantics) &&
- usesLayout<IEEEFloat>(*RHS.semantics)) {
- IEEE = std::move(RHS.IEEE);
- } else if (usesLayout<DoubleAPFloat>(*semantics) &&
- usesLayout<DoubleAPFloat>(*RHS.semantics)) {
- Double = std::move(RHS.Double);
- } else if (this != &RHS) {
- this->~Storage();
- new (this) Storage(std::move(RHS));
- }
- return *this;
- }
+ LLVM_ABI ~Storage();
+ LLVM_ABI Storage(const Storage &RHS);
+ LLVM_ABI Storage(Storage &&RHS);
+ LLVM_ABI Storage &operator=(const Storage &RHS);
+ LLVM_ABI Storage &operator=(Storage &&RHS);
} U;
template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
diff --git a/llvm/include/llvm/ADT/STLForwardCompat.h b/llvm/include/llvm/ADT/STLForwardCompat.h
index da9d3ab0..273a5cf 100644
--- a/llvm/include/llvm/ADT/STLForwardCompat.h
+++ b/llvm/include/llvm/ADT/STLForwardCompat.h
@@ -26,6 +26,54 @@ namespace llvm {
// Features from C++20
//===----------------------------------------------------------------------===//
+namespace numbers {
+// clang-format off
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T e_v = T(0x1.5bf0a8b145769P+1); // (2.7182818284590452354) https://oeis.org/A001113
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T egamma_v = T(0x1.2788cfc6fb619P-1); // (.57721566490153286061) https://oeis.org/A001620
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T ln2_v = T(0x1.62e42fefa39efP-1); // (.69314718055994530942) https://oeis.org/A002162
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T ln10_v = T(0x1.26bb1bbb55516P+1); // (2.3025850929940456840) https://oeis.org/A002392
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T log2e_v = T(0x1.71547652b82feP+0); // (1.4426950408889634074)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T log10e_v = T(0x1.bcb7b1526e50eP-2); // (.43429448190325182765)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T pi_v = T(0x1.921fb54442d18P+1); // (3.1415926535897932385) https://oeis.org/A000796
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_pi_v = T(0x1.45f306dc9c883P-2); // (.31830988618379067154) https://oeis.org/A049541
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_sqrtpi_v = T(0x1.20dd750429b6dP-1); // (.56418958354775628695) https://oeis.org/A087197
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T sqrt2_v = T(0x1.6a09e667f3bcdP+0); // (1.4142135623730950488) https://oeis.org/A00219
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_sqrt2_v = T(0x1.6a09e667f3bcdP-1); // (.70710678118654752440)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T sqrt3_v = T(0x1.bb67ae8584caaP+0); // (1.7320508075688772935) https://oeis.org/A002194
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T inv_sqrt3_v = T(0x1.279a74590331cP-1); // (.57735026918962576451)
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T phi_v = T(0x1.9e3779b97f4a8P+0); // (1.6180339887498948482) https://oeis.org/A001622
+
+inline constexpr double e = e_v<double>;
+inline constexpr double egamma = egamma_v<double>;
+inline constexpr double ln2 = ln2_v<double>;
+inline constexpr double ln10 = ln10_v<double>;
+inline constexpr double log2e = log2e_v<double>;
+inline constexpr double log10e = log10e_v<double>;
+inline constexpr double pi = pi_v<double>;
+inline constexpr double inv_pi = inv_pi_v<double>;
+inline constexpr double inv_sqrtpi = inv_sqrtpi_v<double>;
+inline constexpr double sqrt2 = sqrt2_v<double>;
+inline constexpr double inv_sqrt2 = inv_sqrt2_v<double>;
+inline constexpr double sqrt3 = sqrt3_v<double>;
+inline constexpr double inv_sqrt3 = inv_sqrt3_v<double>;
+inline constexpr double phi = phi_v<double>;
+// clang-format on
+} // namespace numbers
+
template <typename T>
struct remove_cvref // NOLINT(readability-identifier-naming)
{
diff --git a/llvm/include/llvm/ADT/bit.h b/llvm/include/llvm/ADT/bit.h
index 66c4f94..8c68d0a 100644
--- a/llvm/include/llvm/ADT/bit.h
+++ b/llvm/include/llvm/ADT/bit.h
@@ -336,33 +336,28 @@ template <typename T> [[nodiscard]] T bit_ceil(T Value) {
return T(1) << llvm::bit_width<T>(Value - 1u);
}
-// Forward-declare rotr so that rotl can use it.
-template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
-[[nodiscard]] constexpr T rotr(T V, int R);
-
template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
[[nodiscard]] constexpr T rotl(T V, int R) {
- unsigned N = std::numeric_limits<T>::digits;
+ constexpr unsigned N = std::numeric_limits<T>::digits;
- R = R % N;
- if (!R)
- return V;
+ static_assert(has_single_bit(N), "& (N - 1) is only valid for powers of two");
+ R = R & (N - 1);
- if (R < 0)
- return llvm::rotr(V, -R);
+ if (R == 0)
+ return V;
return (V << R) | (V >> (N - R));
}
-template <typename T, typename> [[nodiscard]] constexpr T rotr(T V, int R) {
- unsigned N = std::numeric_limits<T>::digits;
+template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
+[[nodiscard]] constexpr T rotr(T V, int R) {
+ constexpr unsigned N = std::numeric_limits<T>::digits;
- R = R % N;
- if (!R)
- return V;
+ static_assert(has_single_bit(N), "& (N - 1) is only valid for powers of two");
+ R = R & (N - 1);
- if (R < 0)
- return llvm::rotl(V, -R);
+ if (R == 0)
+ return V;
return (V >> R) | (V << (N - R));
}
diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
index fc69cb0..1755257 100644
--- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
@@ -36,7 +36,6 @@ namespace llvm {
class AAResults;
class AssumptionCache;
-class BlockFrequencyInfo;
class DominatorTree;
class Function;
class Loop;
@@ -58,7 +57,6 @@ struct LoopStandardAnalysisResults {
ScalarEvolution &SE;
TargetLibraryInfo &TLI;
TargetTransformInfo &TTI;
- BlockFrequencyInfo *BFI;
MemorySSA *MSSA;
};
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index e5a6c8c..3d3ec14 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1345,6 +1345,7 @@ public:
class LoopGuards {
DenseMap<const SCEV *, const SCEV *> RewriteMap;
+ SmallDenseSet<std::pair<const SCEV *, const SCEV *>> NotEqual;
bool PreserveNUW = false;
bool PreserveNSW = false;
ScalarEvolution &SE;
diff --git a/llvm/include/llvm/Support/DOTGraphTraits.h b/llvm/include/llvm/Support/DOTGraphTraits.h
index ffa9abe..bf30aa4 100644
--- a/llvm/include/llvm/Support/DOTGraphTraits.h
+++ b/llvm/include/llvm/Support/DOTGraphTraits.h
@@ -164,7 +164,7 @@ public:
///
template <typename Ty>
struct DOTGraphTraits : public DefaultDOTGraphTraits {
- DOTGraphTraits (bool simple=false) : DefaultDOTGraphTraits (simple) {}
+ using DefaultDOTGraphTraits::DefaultDOTGraphTraits;
};
} // End llvm namespace
diff --git a/llvm/include/llvm/Support/MathExtras.h b/llvm/include/llvm/Support/MathExtras.h
index c2716a9..41232335 100644
--- a/llvm/include/llvm/Support/MathExtras.h
+++ b/llvm/include/llvm/Support/MathExtras.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_MATHEXTRAS_H
#define LLVM_SUPPORT_MATHEXTRAS_H
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/bit.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
@@ -42,38 +43,28 @@ using common_sint =
/// Mathematical constants.
namespace numbers {
-// TODO: Track C++20 std::numbers.
// clang-format off
-constexpr double e = 0x1.5bf0a8b145769P+1, // (2.7182818284590452354) https://oeis.org/A001113
- egamma = 0x1.2788cfc6fb619P-1, // (.57721566490153286061) https://oeis.org/A001620
- ln2 = 0x1.62e42fefa39efP-1, // (.69314718055994530942) https://oeis.org/A002162
- ln10 = 0x1.26bb1bbb55516P+1, // (2.3025850929940456840) https://oeis.org/A002392
- log2e = 0x1.71547652b82feP+0, // (1.4426950408889634074)
- log10e = 0x1.bcb7b1526e50eP-2, // (.43429448190325182765)
- pi = 0x1.921fb54442d18P+1, // (3.1415926535897932385) https://oeis.org/A000796
- inv_pi = 0x1.45f306dc9c883P-2, // (.31830988618379067154) https://oeis.org/A049541
- sqrtpi = 0x1.c5bf891b4ef6bP+0, // (1.7724538509055160273) https://oeis.org/A002161
- inv_sqrtpi = 0x1.20dd750429b6dP-1, // (.56418958354775628695) https://oeis.org/A087197
- sqrt2 = 0x1.6a09e667f3bcdP+0, // (1.4142135623730950488) https://oeis.org/A00219
- inv_sqrt2 = 0x1.6a09e667f3bcdP-1, // (.70710678118654752440)
- sqrt3 = 0x1.bb67ae8584caaP+0, // (1.7320508075688772935) https://oeis.org/A002194
- inv_sqrt3 = 0x1.279a74590331cP-1, // (.57735026918962576451)
- phi = 0x1.9e3779b97f4a8P+0; // (1.6180339887498948482) https://oeis.org/A001622
-constexpr float ef = 0x1.5bf0a8P+1F, // (2.71828183) https://oeis.org/A001113
- egammaf = 0x1.2788d0P-1F, // (.577215665) https://oeis.org/A001620
- ln2f = 0x1.62e430P-1F, // (.693147181) https://oeis.org/A002162
- ln10f = 0x1.26bb1cP+1F, // (2.30258509) https://oeis.org/A002392
- log2ef = 0x1.715476P+0F, // (1.44269504)
- log10ef = 0x1.bcb7b2P-2F, // (.434294482)
- pif = 0x1.921fb6P+1F, // (3.14159265) https://oeis.org/A000796
- inv_pif = 0x1.45f306P-2F, // (.318309886) https://oeis.org/A049541
- sqrtpif = 0x1.c5bf8aP+0F, // (1.77245385) https://oeis.org/A002161
- inv_sqrtpif = 0x1.20dd76P-1F, // (.564189584) https://oeis.org/A087197
- sqrt2f = 0x1.6a09e6P+0F, // (1.41421356) https://oeis.org/A002193
- inv_sqrt2f = 0x1.6a09e6P-1F, // (.707106781)
- sqrt3f = 0x1.bb67aeP+0F, // (1.73205081) https://oeis.org/A002194
- inv_sqrt3f = 0x1.279a74P-1F, // (.577350269)
- phif = 0x1.9e377aP+0F; // (1.61803399) https://oeis.org/A001622
+inline constexpr float ef = e_v<float>;
+inline constexpr float egammaf = egamma_v<float>;
+inline constexpr float ln2f = ln2_v<float>;
+inline constexpr float ln10f = ln10_v<float>;
+inline constexpr float log2ef = log2e_v<float>;
+inline constexpr float log10ef = log10e_v<float>;
+inline constexpr float pif = pi_v<float>;
+inline constexpr float inv_pif = inv_pi_v<float>;
+inline constexpr float inv_sqrtpif = inv_sqrtpi_v<float>;
+inline constexpr float sqrt2f = sqrt2_v<float>;
+inline constexpr float inv_sqrt2f = inv_sqrt2_v<float>;
+inline constexpr float sqrt3f = sqrt3_v<float>;
+inline constexpr float inv_sqrt3f = inv_sqrt3_v<float>;
+inline constexpr float phif = phi_v<float>;
+
+// sqrtpi is not in C++20 std::numbers.
+template <typename T, typename = std::enable_if_t<std::is_floating_point_v<T>>>
+inline constexpr T sqrtpi_v = T(0x1.c5bf891b4ef6bP+0); // (1.7724538509055160273) https://oeis.org/A002161
+inline constexpr double sqrtpi = sqrtpi_v<double>;
+inline constexpr float sqrtpif = sqrtpi_v<float>;
+
// These string literals are taken from below:
// https://github.com/bminor/glibc/blob/8543577b04ded6d979ffcc5a818930e4d74d0645/math/math.h#L1215-L1229
constexpr const char *pis = "3.141592653589793238462643383279502884",
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
index 750f954..1842d2d 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -404,10 +404,8 @@ public:
explicit FunctionToLoopPassAdaptor(std::unique_ptr<PassConceptT> Pass,
bool UseMemorySSA = false,
- bool UseBlockFrequencyInfo = false,
bool LoopNestMode = false)
: Pass(std::move(Pass)), UseMemorySSA(UseMemorySSA),
- UseBlockFrequencyInfo(UseBlockFrequencyInfo),
LoopNestMode(LoopNestMode) {
LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
LoopCanonicalizationFPM.addPass(LCSSAPass());
@@ -429,7 +427,6 @@ private:
FunctionPassManager LoopCanonicalizationFPM;
bool UseMemorySSA = false;
- bool UseBlockFrequencyInfo = false;
const bool LoopNestMode;
};
@@ -442,8 +439,7 @@ private:
/// \c LoopPassManager and the returned adaptor will be in loop-nest mode.
template <typename LoopPassT>
inline FunctionToLoopPassAdaptor
-createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
- bool UseBlockFrequencyInfo = false) {
+createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false) {
if constexpr (is_detected<HasRunOnLoopT, LoopPassT>::value) {
using PassModelT =
detail::PassModel<Loop, LoopPassT, LoopAnalysisManager,
@@ -453,7 +449,7 @@ createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
return FunctionToLoopPassAdaptor(
std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
new PassModelT(std::forward<LoopPassT>(Pass))),
- UseMemorySSA, UseBlockFrequencyInfo, false);
+ UseMemorySSA, false);
} else {
LoopPassManager LPM;
LPM.addPass(std::forward<LoopPassT>(Pass));
@@ -465,7 +461,7 @@ createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
return FunctionToLoopPassAdaptor(
std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
new PassModelT(std::move(LPM))),
- UseMemorySSA, UseBlockFrequencyInfo, true);
+ UseMemorySSA, true);
}
}
@@ -474,8 +470,7 @@ createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
template <>
inline FunctionToLoopPassAdaptor
createFunctionToLoopPassAdaptor<LoopPassManager>(LoopPassManager &&LPM,
- bool UseMemorySSA,
- bool UseBlockFrequencyInfo) {
+ bool UseMemorySSA) {
// Check if LPM contains any loop pass and if it does not, returns an adaptor
// in loop-nest mode.
using PassModelT =
@@ -487,7 +482,7 @@ createFunctionToLoopPassAdaptor<LoopPassManager>(LoopPassManager &&LPM,
return FunctionToLoopPassAdaptor(
std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
new PassModelT(std::move(LPM))),
- UseMemorySSA, UseBlockFrequencyInfo, LoopNestMode);
+ UseMemorySSA, LoopNestMode);
}
/// Pass for printing a loop's contents as textual IR.
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 0e5bc48..df75999 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -947,9 +947,8 @@ LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
/*UseBlockValue*/ false));
}
- ValueLatticeElement Result = TrueVal;
- Result.mergeIn(FalseVal);
- return Result;
+ TrueVal.mergeIn(FalseVal);
+ return TrueVal;
}
std::optional<ConstantRange>
@@ -1778,9 +1777,8 @@ ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
assert(OptResult && "Value not available after solving");
}
- ValueLatticeElement Result = *OptResult;
- LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
- return Result;
+ LLVM_DEBUG(dbgs() << " Result = " << *OptResult << "\n");
+ return *OptResult;
}
ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index e06b095..425420f 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -15740,19 +15740,26 @@ void ScalarEvolution::LoopGuards::collectFromBlock(
GetNextSCEVDividesByDivisor(One, DividesBy);
To = SE.getUMaxExpr(FromRewritten, OneAlignedUp);
} else {
+ // LHS != RHS can be rewritten as (LHS - RHS) = UMax(1, LHS - RHS),
+ // but creating the subtraction eagerly is expensive. Track the
+ // inequalities in a separate map, and materialize the rewrite lazily
+ // when encountering a suitable subtraction while re-writing.
if (LHS->getType()->isPointerTy()) {
LHS = SE.getLosslessPtrToIntExpr(LHS);
RHS = SE.getLosslessPtrToIntExpr(RHS);
if (isa<SCEVCouldNotCompute>(LHS) || isa<SCEVCouldNotCompute>(RHS))
break;
}
- auto AddSubRewrite = [&](const SCEV *A, const SCEV *B) {
- const SCEV *Sub = SE.getMinusSCEV(A, B);
- AddRewrite(Sub, Sub,
- SE.getUMaxExpr(Sub, SE.getOne(From->getType())));
- };
- AddSubRewrite(LHS, RHS);
- AddSubRewrite(RHS, LHS);
+ const SCEVConstant *C;
+ const SCEV *A, *B;
+ if (match(RHS, m_scev_Add(m_SCEVConstant(C), m_SCEV(A))) &&
+ match(LHS, m_scev_Add(m_scev_Specific(C), m_SCEV(B)))) {
+ RHS = A;
+ LHS = B;
+ }
+ if (LHS > RHS)
+ std::swap(LHS, RHS);
+ Guards.NotEqual.insert({LHS, RHS});
continue;
}
break;
@@ -15886,13 +15893,15 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
class SCEVLoopGuardRewriter
: public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
const DenseMap<const SCEV *, const SCEV *> &Map;
+ const SmallDenseSet<std::pair<const SCEV *, const SCEV *>> &NotEqual;
SCEV::NoWrapFlags FlagMask = SCEV::FlagAnyWrap;
public:
SCEVLoopGuardRewriter(ScalarEvolution &SE,
const ScalarEvolution::LoopGuards &Guards)
- : SCEVRewriteVisitor(SE), Map(Guards.RewriteMap) {
+ : SCEVRewriteVisitor(SE), Map(Guards.RewriteMap),
+ NotEqual(Guards.NotEqual) {
if (Guards.PreserveNUW)
FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNUW);
if (Guards.PreserveNSW)
@@ -15947,14 +15956,36 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+ // Helper to check if S is a subtraction (A - B) where A != B, and if so,
+ // return UMax(S, 1).
+ auto RewriteSubtraction = [&](const SCEV *S) -> const SCEV * {
+ const SCEV *LHS, *RHS;
+ if (MatchBinarySub(S, LHS, RHS)) {
+ if (LHS > RHS)
+ std::swap(LHS, RHS);
+ if (NotEqual.contains({LHS, RHS}))
+ return SE.getUMaxExpr(S, SE.getOne(S->getType()));
+ }
+ return nullptr;
+ };
+
+ // Check if Expr itself is a subtraction pattern with guard info.
+ if (const SCEV *Rewritten = RewriteSubtraction(Expr))
+ return Rewritten;
+
// Trip count expressions sometimes consist of adding 3 operands, i.e.
// (Const + A + B). There may be guard info for A + B, and if so, apply
// it.
// TODO: Could more generally apply guards to Add sub-expressions.
if (isa<SCEVConstant>(Expr->getOperand(0)) &&
Expr->getNumOperands() == 3) {
- if (const SCEV *S = Map.lookup(
- SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2))))
+ const SCEV *Add =
+ SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
+ if (const SCEV *Rewritten = RewriteSubtraction(Add))
+ return SE.getAddExpr(
+ Expr->getOperand(0), Rewritten,
+ ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
+ if (const SCEV *S = Map.lookup(Add))
return SE.getAddExpr(Expr->getOperand(0), S);
}
SmallVector<const SCEV *, 2> Operands;
@@ -15989,7 +16020,7 @@ const SCEV *ScalarEvolution::LoopGuards::rewrite(const SCEV *Expr) const {
}
};
- if (RewriteMap.empty())
+ if (RewriteMap.empty() && NotEqual.empty())
return Expr;
SCEVLoopGuardRewriter Rewriter(SE, *this);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 72582d7..567acf7 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1039,12 +1039,17 @@ void DwarfDebug::finishUnitAttributes(const DICompileUnit *DIUnit,
} else
NewCU.addString(Die, dwarf::DW_AT_producer, Producer);
- if (auto Lang = DIUnit->getSourceLanguage(); Lang.hasVersionedName())
+ if (auto Lang = DIUnit->getSourceLanguage(); Lang.hasVersionedName()) {
NewCU.addUInt(Die, dwarf::DW_AT_language_name, dwarf::DW_FORM_data2,
Lang.getName());
- else
+
+ if (uint32_t LangVersion = Lang.getVersion(); LangVersion != 0)
+ NewCU.addUInt(Die, dwarf::DW_AT_language_version, /*Form=*/std::nullopt,
+ LangVersion);
+ } else {
NewCU.addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
Lang.getName());
+ }
NewCU.addString(Die, dwarf::DW_AT_name, FN);
StringRef SysRoot = DIUnit->getSysRoot();
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 53cf004..e45cac8 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -2027,13 +2027,13 @@ Error PassBuilder::parseModulePass(ModulePassManager &MPM,
#define LOOPNEST_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
MPM.addPass(createModuleToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
MPM.addPass(createModuleToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
@@ -2041,9 +2041,8 @@ Error PassBuilder::parseModulePass(ModulePassManager &MPM,
auto Params = parsePassParameters(PARSER, Name, NAME); \
if (!Params) \
return Params.takeError(); \
- MPM.addPass( \
- createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
- CREATE_PASS(Params.get()), false, false))); \
+ MPM.addPass(createModuleToFunctionPassAdaptor( \
+ createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), false))); \
return Error::success(); \
}
#include "PassRegistry.def"
@@ -2142,13 +2141,13 @@ Error PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
#define LOOPNEST_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
CGPM.addPass(createCGSCCToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
CGPM.addPass(createCGSCCToFunctionPassAdaptor( \
- createFunctionToLoopPassAdaptor(CREATE_PASS, false, false))); \
+ createFunctionToLoopPassAdaptor(CREATE_PASS, false))); \
return Error::success(); \
}
#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
@@ -2156,9 +2155,8 @@ Error PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
auto Params = parsePassParameters(PARSER, Name, NAME); \
if (!Params) \
return Params.takeError(); \
- CGPM.addPass( \
- createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
- CREATE_PASS(Params.get()), false, false))); \
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor( \
+ createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), false))); \
return Error::success(); \
}
#include "PassRegistry.def"
@@ -2191,11 +2189,8 @@ Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
return Err;
// Add the nested pass manager with the appropriate adaptor.
bool UseMemorySSA = (Name == "loop-mssa");
- bool UseBFI = llvm::any_of(InnerPipeline, [](auto Pipeline) {
- return Pipeline.Name.contains("simple-loop-unswitch");
- });
- FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM), UseMemorySSA,
- UseBFI));
+ FPM.addPass(
+ createFunctionToLoopPassAdaptor(std::move(LPM), UseMemorySSA));
return Error::success();
}
if (Name == "machine-function") {
@@ -2248,12 +2243,12 @@ Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
// The risk is that it may become obsolete if we're not careful.
#define LOOPNEST_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
- FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false, false)); \
+ FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false)); \
return Error::success(); \
}
#define LOOP_PASS(NAME, CREATE_PASS) \
if (Name == NAME) { \
- FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false, false)); \
+ FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false)); \
return Error::success(); \
}
#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
@@ -2261,8 +2256,8 @@ Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
auto Params = parsePassParameters(PARSER, Name, NAME); \
if (!Params) \
return Params.takeError(); \
- FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), \
- false, false)); \
+ FPM.addPass( \
+ createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), false)); \
return Error::success(); \
}
#include "PassRegistry.def"
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 3f3939eaf..bd03ac0 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -519,16 +519,14 @@ PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
invokeLoopOptimizerEndEPCallbacks(LPM2, Level);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM1),
- /*UseMemorySSA=*/true,
- /*UseBlockFrequencyInfo=*/true));
+ /*UseMemorySSA=*/true));
FPM.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
FPM.addPass(InstCombinePass());
// The loop passes in LPM2 (LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
- /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/false));
// Delete small array after loop unroll.
FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
@@ -710,8 +708,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
invokeLoopOptimizerEndEPCallbacks(LPM2, Level);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM1),
- /*UseMemorySSA=*/true,
- /*UseBlockFrequencyInfo=*/true));
+ /*UseMemorySSA=*/true));
FPM.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
FPM.addPass(InstCombinePass());
@@ -719,8 +716,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// LoopDeletionPass and LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
- /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/false));
// Delete small array after loop unroll.
FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
@@ -773,7 +769,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
FPM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*UseMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/true));
FPM.addPass(CoroElidePass());
@@ -842,8 +838,7 @@ void PassBuilder::addPostPGOLoopRotation(ModulePassManager &MPM,
createFunctionToLoopPassAdaptor(
LoopRotatePass(EnableLoopHeaderDuplication ||
Level != OptimizationLevel::Oz),
- /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false),
+ /*UseMemorySSA=*/false),
PTO.EagerlyInvalidateAnalyses));
}
}
@@ -1358,8 +1353,7 @@ void PassBuilder::addVectorPasses(OptimizationLevel Level,
LPM.addPass(SimpleLoopUnswitchPass(/* NonTrivial */ Level ==
OptimizationLevel::O3));
ExtraPasses.addPass(
- createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/true,
- /*UseBlockFrequencyInfo=*/true));
+ createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/true));
ExtraPasses.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
ExtraPasses.addPass(InstCombinePass());
@@ -1438,7 +1432,7 @@ void PassBuilder::addVectorPasses(OptimizationLevel Level,
FPM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*UseMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*UseMemorySSA=*/true));
// Now that we've vectorized and unrolled loops, we may have more refined
// alignment information, try to re-derive it here.
@@ -1520,7 +1514,7 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
OptimizePM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*USeMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*USeMemorySSA=*/true));
}
OptimizePM.addPass(Float2IntPass());
@@ -1560,8 +1554,8 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
if (PTO.LoopInterchange)
LPM.addPass(LoopInterchangePass());
- OptimizePM.addPass(createFunctionToLoopPassAdaptor(
- std::move(LPM), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/false));
+ OptimizePM.addPass(
+ createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/false));
// FIXME: This may not be the right place in the pipeline.
// We need to have the data to support the right place.
@@ -2133,7 +2127,7 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
MainFPM.addPass(createFunctionToLoopPassAdaptor(
LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true),
- /*USeMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
+ /*USeMemorySSA=*/true));
if (RunNewGVN)
MainFPM.addPass(NewGVNPass());
@@ -2163,8 +2157,8 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
PTO.ForgetAllSCEVInLoopUnroll));
// The loop passes in LPM (LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
- MainFPM.addPass(createFunctionToLoopPassAdaptor(
- std::move(LPM), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/true));
+ MainFPM.addPass(
+ createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/false));
MainFPM.addPass(LoopDistributePass());
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index 8623c06..b4de79a 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -130,44 +130,46 @@ struct fltSemantics {
bool hasSignBitInMSB = true;
};
-static constexpr fltSemantics semIEEEhalf = {15, -14, 11, 16};
-static constexpr fltSemantics semBFloat = {127, -126, 8, 16};
-static constexpr fltSemantics semIEEEsingle = {127, -126, 24, 32};
-static constexpr fltSemantics semIEEEdouble = {1023, -1022, 53, 64};
-static constexpr fltSemantics semIEEEquad = {16383, -16382, 113, 128};
-static constexpr fltSemantics semFloat8E5M2 = {15, -14, 3, 8};
-static constexpr fltSemantics semFloat8E5M2FNUZ = {
+constexpr fltSemantics APFloatBase::semIEEEhalf = {15, -14, 11, 16};
+constexpr fltSemantics APFloatBase::semBFloat = {127, -126, 8, 16};
+constexpr fltSemantics APFloatBase::semIEEEsingle = {127, -126, 24, 32};
+constexpr fltSemantics APFloatBase::semIEEEdouble = {1023, -1022, 53, 64};
+constexpr fltSemantics APFloatBase::semIEEEquad = {16383, -16382, 113, 128};
+constexpr fltSemantics APFloatBase::semFloat8E5M2 = {15, -14, 3, 8};
+constexpr fltSemantics APFloatBase::semFloat8E5M2FNUZ = {
15, -15, 3, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::NegativeZero};
-static constexpr fltSemantics semFloat8E4M3 = {7, -6, 4, 8};
-static constexpr fltSemantics semFloat8E4M3FN = {
+constexpr fltSemantics APFloatBase::semFloat8E4M3 = {7, -6, 4, 8};
+constexpr fltSemantics APFloatBase::semFloat8E4M3FN = {
8, -6, 4, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::AllOnes};
-static constexpr fltSemantics semFloat8E4M3FNUZ = {
+constexpr fltSemantics APFloatBase::semFloat8E4M3FNUZ = {
7, -7, 4, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::NegativeZero};
-static constexpr fltSemantics semFloat8E4M3B11FNUZ = {
+constexpr fltSemantics APFloatBase::semFloat8E4M3B11FNUZ = {
4, -10, 4, 8, fltNonfiniteBehavior::NanOnly, fltNanEncoding::NegativeZero};
-static constexpr fltSemantics semFloat8E3M4 = {3, -2, 5, 8};
-static constexpr fltSemantics semFloatTF32 = {127, -126, 11, 19};
-static constexpr fltSemantics semFloat8E8M0FNU = {127,
- -127,
- 1,
- 8,
- fltNonfiniteBehavior::NanOnly,
- fltNanEncoding::AllOnes,
- false,
- false,
- false};
-
-static constexpr fltSemantics semFloat6E3M2FN = {
+constexpr fltSemantics APFloatBase::semFloat8E3M4 = {3, -2, 5, 8};
+constexpr fltSemantics APFloatBase::semFloatTF32 = {127, -126, 11, 19};
+constexpr fltSemantics APFloatBase::semFloat8E8M0FNU = {
+ 127,
+ -127,
+ 1,
+ 8,
+ fltNonfiniteBehavior::NanOnly,
+ fltNanEncoding::AllOnes,
+ false,
+ false,
+ false};
+
+constexpr fltSemantics APFloatBase::semFloat6E3M2FN = {
4, -2, 3, 6, fltNonfiniteBehavior::FiniteOnly};
-static constexpr fltSemantics semFloat6E2M3FN = {
+constexpr fltSemantics APFloatBase::semFloat6E2M3FN = {
2, 0, 4, 6, fltNonfiniteBehavior::FiniteOnly};
-static constexpr fltSemantics semFloat4E2M1FN = {
+constexpr fltSemantics APFloatBase::semFloat4E2M1FN = {
2, 0, 2, 4, fltNonfiniteBehavior::FiniteOnly};
-static constexpr fltSemantics semX87DoubleExtended = {16383, -16382, 64, 80};
-static constexpr fltSemantics semBogus = {0, 0, 0, 0};
-static constexpr fltSemantics semPPCDoubleDouble = {-1, 0, 0, 128};
-static constexpr fltSemantics semPPCDoubleDoubleLegacy = {1023, -1022 + 53,
- 53 + 53, 128};
+constexpr fltSemantics APFloatBase::semX87DoubleExtended = {16383, -16382, 64,
+ 80};
+constexpr fltSemantics APFloatBase::semBogus = {0, 0, 0, 0};
+constexpr fltSemantics APFloatBase::semPPCDoubleDouble = {-1, 0, 0, 128};
+constexpr fltSemantics APFloatBase::semPPCDoubleDoubleLegacy = {
+ 1023, -1022 + 53, 53 + 53, 128};
const llvm::fltSemantics &APFloatBase::EnumToSemantics(Semantics S) {
switch (S) {
@@ -261,36 +263,6 @@ APFloatBase::SemanticsToEnum(const llvm::fltSemantics &Sem) {
llvm_unreachable("Unknown floating semantics");
}
-const fltSemantics &APFloatBase::IEEEhalf() { return semIEEEhalf; }
-const fltSemantics &APFloatBase::BFloat() { return semBFloat; }
-const fltSemantics &APFloatBase::IEEEsingle() { return semIEEEsingle; }
-const fltSemantics &APFloatBase::IEEEdouble() { return semIEEEdouble; }
-const fltSemantics &APFloatBase::IEEEquad() { return semIEEEquad; }
-const fltSemantics &APFloatBase::PPCDoubleDouble() {
- return semPPCDoubleDouble;
-}
-const fltSemantics &APFloatBase::PPCDoubleDoubleLegacy() {
- return semPPCDoubleDoubleLegacy;
-}
-const fltSemantics &APFloatBase::Float8E5M2() { return semFloat8E5M2; }
-const fltSemantics &APFloatBase::Float8E5M2FNUZ() { return semFloat8E5M2FNUZ; }
-const fltSemantics &APFloatBase::Float8E4M3() { return semFloat8E4M3; }
-const fltSemantics &APFloatBase::Float8E4M3FN() { return semFloat8E4M3FN; }
-const fltSemantics &APFloatBase::Float8E4M3FNUZ() { return semFloat8E4M3FNUZ; }
-const fltSemantics &APFloatBase::Float8E4M3B11FNUZ() {
- return semFloat8E4M3B11FNUZ;
-}
-const fltSemantics &APFloatBase::Float8E3M4() { return semFloat8E3M4; }
-const fltSemantics &APFloatBase::FloatTF32() { return semFloatTF32; }
-const fltSemantics &APFloatBase::Float8E8M0FNU() { return semFloat8E8M0FNU; }
-const fltSemantics &APFloatBase::Float6E3M2FN() { return semFloat6E3M2FN; }
-const fltSemantics &APFloatBase::Float6E2M3FN() { return semFloat6E2M3FN; }
-const fltSemantics &APFloatBase::Float4E2M1FN() { return semFloat4E2M1FN; }
-const fltSemantics &APFloatBase::x87DoubleExtended() {
- return semX87DoubleExtended;
-}
-const fltSemantics &APFloatBase::Bogus() { return semBogus; }
-
bool APFloatBase::isRepresentableBy(const fltSemantics &A,
const fltSemantics &B) {
return A.maxExponent <= B.maxExponent && A.minExponent >= B.minExponent &&
@@ -1029,7 +1001,7 @@ void IEEEFloat::makeNaN(bool SNaN, bool Negative, const APInt *fill) {
// For x87 extended precision, we want to make a NaN, not a
// pseudo-NaN. Maybe we should expose the ability to make
// pseudo-NaNs?
- if (semantics == &semX87DoubleExtended)
+ if (semantics == &APFloatBase::semX87DoubleExtended)
APInt::tcSetBit(significand, QNaNBit + 1);
}
@@ -1054,7 +1026,7 @@ IEEEFloat &IEEEFloat::operator=(IEEEFloat &&rhs) {
category = rhs.category;
sign = rhs.sign;
- rhs.semantics = &semBogus;
+ rhs.semantics = &APFloatBase::semBogus;
return *this;
}
@@ -1247,7 +1219,7 @@ IEEEFloat::IEEEFloat(const IEEEFloat &rhs) {
assign(rhs);
}
-IEEEFloat::IEEEFloat(IEEEFloat &&rhs) : semantics(&semBogus) {
+IEEEFloat::IEEEFloat(IEEEFloat &&rhs) : semantics(&APFloatBase::semBogus) {
*this = std::move(rhs);
}
@@ -2607,8 +2579,8 @@ APFloat::opStatus IEEEFloat::convert(const fltSemantics &toSemantics,
shift = toSemantics.precision - fromSemantics.precision;
bool X86SpecialNan = false;
- if (&fromSemantics == &semX87DoubleExtended &&
- &toSemantics != &semX87DoubleExtended && category == fcNaN &&
+ if (&fromSemantics == &APFloatBase::semX87DoubleExtended &&
+ &toSemantics != &APFloatBase::semX87DoubleExtended && category == fcNaN &&
(!(*significandParts() & 0x8000000000000000ULL) ||
!(*significandParts() & 0x4000000000000000ULL))) {
// x86 has some unusual NaNs which cannot be represented in any other
@@ -2694,7 +2666,7 @@ APFloat::opStatus IEEEFloat::convert(const fltSemantics &toSemantics,
// For x87 extended precision, we want to make a NaN, not a special NaN if
// the input wasn't special either.
- if (!X86SpecialNan && semantics == &semX87DoubleExtended)
+ if (!X86SpecialNan && semantics == &APFloatBase::semX87DoubleExtended)
APInt::tcSetBit(significandParts(), semantics->precision - 1);
// Convert of sNaN creates qNaN and raises an exception (invalid op).
@@ -3530,7 +3502,8 @@ hash_code hash_value(const IEEEFloat &Arg) {
// the actual IEEE respresentations. We compensate for that here.
APInt IEEEFloat::convertF80LongDoubleAPFloatToAPInt() const {
- assert(semantics == (const llvm::fltSemantics*)&semX87DoubleExtended);
+ assert(semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semX87DoubleExtended);
assert(partCount()==2);
uint64_t myexponent, mysignificand;
@@ -3560,7 +3533,8 @@ APInt IEEEFloat::convertF80LongDoubleAPFloatToAPInt() const {
}
APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
- assert(semantics == (const llvm::fltSemantics *)&semPPCDoubleDoubleLegacy);
+ assert(semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semPPCDoubleDoubleLegacy);
assert(partCount()==2);
uint64_t words[2];
@@ -3574,14 +3548,14 @@ APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
// Declare fltSemantics before APFloat that uses it (and
// saves pointer to it) to ensure correct destruction order.
fltSemantics extendedSemantics = *semantics;
- extendedSemantics.minExponent = semIEEEdouble.minExponent;
+ extendedSemantics.minExponent = APFloatBase::semIEEEdouble.minExponent;
IEEEFloat extended(*this);
fs = extended.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
IEEEFloat u(extended);
- fs = u.convert(semIEEEdouble, rmNearestTiesToEven, &losesInfo);
+ fs = u.convert(APFloatBase::semIEEEdouble, rmNearestTiesToEven, &losesInfo);
assert(fs == opOK || fs == opInexact);
(void)fs;
words[0] = *u.convertDoubleAPFloatToAPInt().getRawData();
@@ -3597,7 +3571,7 @@ APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
IEEEFloat v(extended);
v.subtract(u, rmNearestTiesToEven);
- fs = v.convert(semIEEEdouble, rmNearestTiesToEven, &losesInfo);
+ fs = v.convert(APFloatBase::semIEEEdouble, rmNearestTiesToEven, &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
words[1] = *v.convertDoubleAPFloatToAPInt().getRawData();
@@ -3611,8 +3585,9 @@ APInt IEEEFloat::convertPPCDoubleDoubleLegacyAPFloatToAPInt() const {
template <const fltSemantics &S>
APInt IEEEFloat::convertIEEEFloatToAPInt() const {
assert(semantics == &S);
- const int bias =
- (semantics == &semFloat8E8M0FNU) ? -S.minExponent : -(S.minExponent - 1);
+ const int bias = (semantics == &APFloatBase::semFloat8E8M0FNU)
+ ? -S.minExponent
+ : -(S.minExponent - 1);
constexpr unsigned int trailing_significand_bits = S.precision - 1;
constexpr int integer_bit_part = trailing_significand_bits / integerPartWidth;
constexpr integerPart integer_bit =
@@ -3677,87 +3652,87 @@ APInt IEEEFloat::convertIEEEFloatToAPInt() const {
APInt IEEEFloat::convertQuadrupleAPFloatToAPInt() const {
assert(partCount() == 2);
- return convertIEEEFloatToAPInt<semIEEEquad>();
+ return convertIEEEFloatToAPInt<APFloatBase::semIEEEquad>();
}
APInt IEEEFloat::convertDoubleAPFloatToAPInt() const {
assert(partCount()==1);
- return convertIEEEFloatToAPInt<semIEEEdouble>();
+ return convertIEEEFloatToAPInt<APFloatBase::semIEEEdouble>();
}
APInt IEEEFloat::convertFloatAPFloatToAPInt() const {
assert(partCount()==1);
- return convertIEEEFloatToAPInt<semIEEEsingle>();
+ return convertIEEEFloatToAPInt<APFloatBase::semIEEEsingle>();
}
APInt IEEEFloat::convertBFloatAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semBFloat>();
+ return convertIEEEFloatToAPInt<APFloatBase::semBFloat>();
}
APInt IEEEFloat::convertHalfAPFloatToAPInt() const {
assert(partCount()==1);
- return convertIEEEFloatToAPInt<semIEEEhalf>();
+ return convertIEEEFloatToAPInt<APFloatBase::APFloatBase::semIEEEhalf>();
}
APInt IEEEFloat::convertFloat8E5M2APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E5M2>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E5M2>();
}
APInt IEEEFloat::convertFloat8E5M2FNUZAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E5M2FNUZ>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E5M2FNUZ>();
}
APInt IEEEFloat::convertFloat8E4M3APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3>();
}
APInt IEEEFloat::convertFloat8E4M3FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3FN>();
}
APInt IEEEFloat::convertFloat8E4M3FNUZAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3FNUZ>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3FNUZ>();
}
APInt IEEEFloat::convertFloat8E4M3B11FNUZAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E4M3B11FNUZ>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E4M3B11FNUZ>();
}
APInt IEEEFloat::convertFloat8E3M4APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E3M4>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E3M4>();
}
APInt IEEEFloat::convertFloatTF32APFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloatTF32>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloatTF32>();
}
APInt IEEEFloat::convertFloat8E8M0FNUAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat8E8M0FNU>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat8E8M0FNU>();
}
APInt IEEEFloat::convertFloat6E3M2FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat6E3M2FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat6E3M2FN>();
}
APInt IEEEFloat::convertFloat6E2M3FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat6E2M3FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat6E2M3FN>();
}
APInt IEEEFloat::convertFloat4E2M1FNAPFloatToAPInt() const {
assert(partCount() == 1);
- return convertIEEEFloatToAPInt<semFloat4E2M1FN>();
+ return convertIEEEFloatToAPInt<APFloatBase::semFloat4E2M1FN>();
}
// This function creates an APInt that is just a bit map of the floating
@@ -3765,74 +3740,77 @@ APInt IEEEFloat::convertFloat4E2M1FNAPFloatToAPInt() const {
// and treating the result as a normal integer is unlikely to be useful.
APInt IEEEFloat::bitcastToAPInt() const {
- if (semantics == (const llvm::fltSemantics*)&semIEEEhalf)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEhalf)
return convertHalfAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semBFloat)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semBFloat)
return convertBFloatAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics*)&semIEEEsingle)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEsingle)
return convertFloatAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics*)&semIEEEdouble)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEdouble)
return convertDoubleAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics*)&semIEEEquad)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEquad)
return convertQuadrupleAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semPPCDoubleDoubleLegacy)
+ if (semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semPPCDoubleDoubleLegacy)
return convertPPCDoubleDoubleLegacyAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E5M2)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E5M2)
return convertFloat8E5M2APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E5M2FNUZ)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E5M2FNUZ)
return convertFloat8E5M2FNUZAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3)
return convertFloat8E4M3APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3FN)
return convertFloat8E4M3FNAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3FNUZ)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3FNUZ)
return convertFloat8E4M3FNUZAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E4M3B11FNUZ)
+ if (semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semFloat8E4M3B11FNUZ)
return convertFloat8E4M3B11FNUZAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E3M4)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E3M4)
return convertFloat8E3M4APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloatTF32)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloatTF32)
return convertFloatTF32APFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat8E8M0FNU)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat8E8M0FNU)
return convertFloat8E8M0FNUAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat6E3M2FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat6E3M2FN)
return convertFloat6E3M2FNAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat6E2M3FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat6E2M3FN)
return convertFloat6E2M3FNAPFloatToAPInt();
- if (semantics == (const llvm::fltSemantics *)&semFloat4E2M1FN)
+ if (semantics == (const llvm::fltSemantics *)&APFloatBase::semFloat4E2M1FN)
return convertFloat4E2M1FNAPFloatToAPInt();
- assert(semantics == (const llvm::fltSemantics*)&semX87DoubleExtended &&
+ assert(semantics ==
+ (const llvm::fltSemantics *)&APFloatBase::semX87DoubleExtended &&
"unknown format!");
return convertF80LongDoubleAPFloatToAPInt();
}
float IEEEFloat::convertToFloat() const {
- assert(semantics == (const llvm::fltSemantics*)&semIEEEsingle &&
+ assert(semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEsingle &&
"Float semantics are not IEEEsingle");
APInt api = bitcastToAPInt();
return api.bitsToFloat();
}
double IEEEFloat::convertToDouble() const {
- assert(semantics == (const llvm::fltSemantics*)&semIEEEdouble &&
+ assert(semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEdouble &&
"Float semantics are not IEEEdouble");
APInt api = bitcastToAPInt();
return api.bitsToDouble();
@@ -3840,7 +3818,7 @@ double IEEEFloat::convertToDouble() const {
#ifdef HAS_IEE754_FLOAT128
float128 IEEEFloat::convertToQuad() const {
- assert(semantics == (const llvm::fltSemantics *)&semIEEEquad &&
+ assert(semantics == (const llvm::fltSemantics *)&APFloatBase::semIEEEquad &&
"Float semantics are not IEEEquads");
APInt api = bitcastToAPInt();
return api.bitsToQuad();
@@ -3861,7 +3839,7 @@ void IEEEFloat::initFromF80LongDoubleAPInt(const APInt &api) {
uint64_t mysignificand = i1;
uint8_t myintegerbit = mysignificand >> 63;
- initialize(&semX87DoubleExtended);
+ initialize(&APFloatBase::semX87DoubleExtended);
assert(partCount()==2);
sign = static_cast<unsigned int>(i2>>15);
@@ -3893,14 +3871,16 @@ void IEEEFloat::initFromPPCDoubleDoubleLegacyAPInt(const APInt &api) {
// Get the first double and convert to our format.
initFromDoubleAPInt(APInt(64, i1));
- fs = convert(semPPCDoubleDoubleLegacy, rmNearestTiesToEven, &losesInfo);
+ fs = convert(APFloatBase::semPPCDoubleDoubleLegacy, rmNearestTiesToEven,
+ &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
// Unless we have a special case, add in second double.
if (isFiniteNonZero()) {
- IEEEFloat v(semIEEEdouble, APInt(64, i2));
- fs = v.convert(semPPCDoubleDoubleLegacy, rmNearestTiesToEven, &losesInfo);
+ IEEEFloat v(APFloatBase::semIEEEdouble, APInt(64, i2));
+ fs = v.convert(APFloatBase::semPPCDoubleDoubleLegacy, rmNearestTiesToEven,
+ &losesInfo);
assert(fs == opOK && !losesInfo);
(void)fs;
@@ -3918,7 +3898,7 @@ void IEEEFloat::initFromFloat8E8M0FNUAPInt(const APInt &api) {
uint64_t val = api.getRawData()[0];
uint64_t myexponent = (val & exponent_mask);
- initialize(&semFloat8E8M0FNU);
+ initialize(&APFloatBase::semFloat8E8M0FNU);
assert(partCount() == 1);
// This format has unsigned representation only
@@ -4025,109 +4005,109 @@ void IEEEFloat::initFromIEEEAPInt(const APInt &api) {
}
void IEEEFloat::initFromQuadrupleAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEquad>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEquad>(api);
}
void IEEEFloat::initFromDoubleAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEdouble>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEdouble>(api);
}
void IEEEFloat::initFromFloatAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEsingle>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEsingle>(api);
}
void IEEEFloat::initFromBFloatAPInt(const APInt &api) {
- initFromIEEEAPInt<semBFloat>(api);
+ initFromIEEEAPInt<APFloatBase::semBFloat>(api);
}
void IEEEFloat::initFromHalfAPInt(const APInt &api) {
- initFromIEEEAPInt<semIEEEhalf>(api);
+ initFromIEEEAPInt<APFloatBase::semIEEEhalf>(api);
}
void IEEEFloat::initFromFloat8E5M2APInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E5M2>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E5M2>(api);
}
void IEEEFloat::initFromFloat8E5M2FNUZAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E5M2FNUZ>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E5M2FNUZ>(api);
}
void IEEEFloat::initFromFloat8E4M3APInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3>(api);
}
void IEEEFloat::initFromFloat8E4M3FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3FN>(api);
}
void IEEEFloat::initFromFloat8E4M3FNUZAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3FNUZ>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3FNUZ>(api);
}
void IEEEFloat::initFromFloat8E4M3B11FNUZAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E4M3B11FNUZ>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E4M3B11FNUZ>(api);
}
void IEEEFloat::initFromFloat8E3M4APInt(const APInt &api) {
- initFromIEEEAPInt<semFloat8E3M4>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat8E3M4>(api);
}
void IEEEFloat::initFromFloatTF32APInt(const APInt &api) {
- initFromIEEEAPInt<semFloatTF32>(api);
+ initFromIEEEAPInt<APFloatBase::semFloatTF32>(api);
}
void IEEEFloat::initFromFloat6E3M2FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat6E3M2FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat6E3M2FN>(api);
}
void IEEEFloat::initFromFloat6E2M3FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat6E2M3FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat6E2M3FN>(api);
}
void IEEEFloat::initFromFloat4E2M1FNAPInt(const APInt &api) {
- initFromIEEEAPInt<semFloat4E2M1FN>(api);
+ initFromIEEEAPInt<APFloatBase::semFloat4E2M1FN>(api);
}
/// Treat api as containing the bits of a floating point number.
void IEEEFloat::initFromAPInt(const fltSemantics *Sem, const APInt &api) {
assert(api.getBitWidth() == Sem->sizeInBits);
- if (Sem == &semIEEEhalf)
+ if (Sem == &APFloatBase::semIEEEhalf)
return initFromHalfAPInt(api);
- if (Sem == &semBFloat)
+ if (Sem == &APFloatBase::semBFloat)
return initFromBFloatAPInt(api);
- if (Sem == &semIEEEsingle)
+ if (Sem == &APFloatBase::semIEEEsingle)
return initFromFloatAPInt(api);
- if (Sem == &semIEEEdouble)
+ if (Sem == &APFloatBase::semIEEEdouble)
return initFromDoubleAPInt(api);
- if (Sem == &semX87DoubleExtended)
+ if (Sem == &APFloatBase::semX87DoubleExtended)
return initFromF80LongDoubleAPInt(api);
- if (Sem == &semIEEEquad)
+ if (Sem == &APFloatBase::semIEEEquad)
return initFromQuadrupleAPInt(api);
- if (Sem == &semPPCDoubleDoubleLegacy)
+ if (Sem == &APFloatBase::semPPCDoubleDoubleLegacy)
return initFromPPCDoubleDoubleLegacyAPInt(api);
- if (Sem == &semFloat8E5M2)
+ if (Sem == &APFloatBase::semFloat8E5M2)
return initFromFloat8E5M2APInt(api);
- if (Sem == &semFloat8E5M2FNUZ)
+ if (Sem == &APFloatBase::semFloat8E5M2FNUZ)
return initFromFloat8E5M2FNUZAPInt(api);
- if (Sem == &semFloat8E4M3)
+ if (Sem == &APFloatBase::semFloat8E4M3)
return initFromFloat8E4M3APInt(api);
- if (Sem == &semFloat8E4M3FN)
+ if (Sem == &APFloatBase::semFloat8E4M3FN)
return initFromFloat8E4M3FNAPInt(api);
- if (Sem == &semFloat8E4M3FNUZ)
+ if (Sem == &APFloatBase::semFloat8E4M3FNUZ)
return initFromFloat8E4M3FNUZAPInt(api);
- if (Sem == &semFloat8E4M3B11FNUZ)
+ if (Sem == &APFloatBase::semFloat8E4M3B11FNUZ)
return initFromFloat8E4M3B11FNUZAPInt(api);
- if (Sem == &semFloat8E3M4)
+ if (Sem == &APFloatBase::semFloat8E3M4)
return initFromFloat8E3M4APInt(api);
- if (Sem == &semFloatTF32)
+ if (Sem == &APFloatBase::semFloatTF32)
return initFromFloatTF32APInt(api);
- if (Sem == &semFloat8E8M0FNU)
+ if (Sem == &APFloatBase::semFloat8E8M0FNU)
return initFromFloat8E8M0FNUAPInt(api);
- if (Sem == &semFloat6E3M2FN)
+ if (Sem == &APFloatBase::semFloat6E3M2FN)
return initFromFloat6E3M2FNAPInt(api);
- if (Sem == &semFloat6E2M3FN)
+ if (Sem == &APFloatBase::semFloat6E2M3FN)
return initFromFloat6E2M3FNAPInt(api);
- if (Sem == &semFloat4E2M1FN)
+ if (Sem == &APFloatBase::semFloat4E2M1FN)
return initFromFloat4E2M1FNAPInt(api);
llvm_unreachable("unsupported semantics");
@@ -4202,11 +4182,11 @@ IEEEFloat::IEEEFloat(const fltSemantics &Sem, const APInt &API) {
}
IEEEFloat::IEEEFloat(float f) {
- initFromAPInt(&semIEEEsingle, APInt::floatToBits(f));
+ initFromAPInt(&APFloatBase::semIEEEsingle, APInt::floatToBits(f));
}
IEEEFloat::IEEEFloat(double d) {
- initFromAPInt(&semIEEEdouble, APInt::doubleToBits(d));
+ initFromAPInt(&APFloatBase::semIEEEdouble, APInt::doubleToBits(d));
}
namespace {
@@ -4815,38 +4795,40 @@ IEEEFloat frexp(const IEEEFloat &Val, int &Exp, roundingMode RM) {
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S)
: Semantics(&S),
- Floats(new APFloat[2]{APFloat(semIEEEdouble), APFloat(semIEEEdouble)}) {
- assert(Semantics == &semPPCDoubleDouble);
+ Floats(new APFloat[2]{APFloat(APFloatBase::semIEEEdouble),
+ APFloat(APFloatBase::semIEEEdouble)}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, uninitializedTag)
- : Semantics(&S),
- Floats(new APFloat[2]{APFloat(semIEEEdouble, uninitialized),
- APFloat(semIEEEdouble, uninitialized)}) {
- assert(Semantics == &semPPCDoubleDouble);
+ : Semantics(&S), Floats(new APFloat[2]{
+ APFloat(APFloatBase::semIEEEdouble, uninitialized),
+ APFloat(APFloatBase::semIEEEdouble, uninitialized)}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, integerPart I)
- : Semantics(&S), Floats(new APFloat[2]{APFloat(semIEEEdouble, I),
- APFloat(semIEEEdouble)}) {
- assert(Semantics == &semPPCDoubleDouble);
+ : Semantics(&S),
+ Floats(new APFloat[2]{APFloat(APFloatBase::semIEEEdouble, I),
+ APFloat(APFloatBase::semIEEEdouble)}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, const APInt &I)
: Semantics(&S),
Floats(new APFloat[2]{
- APFloat(semIEEEdouble, APInt(64, I.getRawData()[0])),
- APFloat(semIEEEdouble, APInt(64, I.getRawData()[1]))}) {
- assert(Semantics == &semPPCDoubleDouble);
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, I.getRawData()[0])),
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, I.getRawData()[1]))}) {
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(const fltSemantics &S, APFloat &&First,
APFloat &&Second)
: Semantics(&S),
Floats(new APFloat[2]{std::move(First), std::move(Second)}) {
- assert(Semantics == &semPPCDoubleDouble);
- assert(&Floats[0].getSemantics() == &semIEEEdouble);
- assert(&Floats[1].getSemantics() == &semIEEEdouble);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
+ assert(&Floats[0].getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&Floats[1].getSemantics() == &APFloatBase::semIEEEdouble);
}
DoubleAPFloat::DoubleAPFloat(const DoubleAPFloat &RHS)
@@ -4854,14 +4836,14 @@ DoubleAPFloat::DoubleAPFloat(const DoubleAPFloat &RHS)
Floats(RHS.Floats ? new APFloat[2]{APFloat(RHS.Floats[0]),
APFloat(RHS.Floats[1])}
: nullptr) {
- assert(Semantics == &semPPCDoubleDouble);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat::DoubleAPFloat(DoubleAPFloat &&RHS)
: Semantics(RHS.Semantics), Floats(RHS.Floats) {
- RHS.Semantics = &semBogus;
+ RHS.Semantics = &APFloatBase::semBogus;
RHS.Floats = nullptr;
- assert(Semantics == &semPPCDoubleDouble);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble);
}
DoubleAPFloat &DoubleAPFloat::operator=(const DoubleAPFloat &RHS) {
@@ -5009,12 +4991,12 @@ APFloat::opStatus DoubleAPFloat::addWithSpecial(const DoubleAPFloat &LHS,
APFloat A(LHS.Floats[0]), AA(LHS.Floats[1]), C(RHS.Floats[0]),
CC(RHS.Floats[1]);
- assert(&A.getSemantics() == &semIEEEdouble);
- assert(&AA.getSemantics() == &semIEEEdouble);
- assert(&C.getSemantics() == &semIEEEdouble);
- assert(&CC.getSemantics() == &semIEEEdouble);
- assert(&Out.Floats[0].getSemantics() == &semIEEEdouble);
- assert(&Out.Floats[1].getSemantics() == &semIEEEdouble);
+ assert(&A.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&AA.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&C.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&CC.getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&Out.Floats[0].getSemantics() == &APFloatBase::semIEEEdouble);
+ assert(&Out.Floats[1].getSemantics() == &APFloatBase::semIEEEdouble);
return Out.addImpl(A, AA, C, CC, RM);
}
@@ -5119,28 +5101,32 @@ APFloat::opStatus DoubleAPFloat::multiply(const DoubleAPFloat &RHS,
APFloat::opStatus DoubleAPFloat::divide(const DoubleAPFloat &RHS,
APFloat::roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
- auto Ret =
- Tmp.divide(APFloat(semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()), RM);
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ auto Ret = Tmp.divide(
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()), RM);
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
APFloat::opStatus DoubleAPFloat::remainder(const DoubleAPFloat &RHS) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
- auto Ret =
- Tmp.remainder(APFloat(semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ auto Ret = Tmp.remainder(
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
APFloat::opStatus DoubleAPFloat::mod(const DoubleAPFloat &RHS) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
- auto Ret = Tmp.mod(APFloat(semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ auto Ret = Tmp.mod(
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, RHS.bitcastToAPInt()));
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
@@ -5148,17 +5134,21 @@ APFloat::opStatus
DoubleAPFloat::fusedMultiplyAdd(const DoubleAPFloat &Multiplicand,
const DoubleAPFloat &Addend,
APFloat::roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy, bitcastToAPInt());
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt());
auto Ret = Tmp.fusedMultiplyAdd(
- APFloat(semPPCDoubleDoubleLegacy, Multiplicand.bitcastToAPInt()),
- APFloat(semPPCDoubleDoubleLegacy, Addend.bitcastToAPInt()), RM);
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy,
+ Multiplicand.bitcastToAPInt()),
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, Addend.bitcastToAPInt()),
+ RM);
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
APFloat::opStatus DoubleAPFloat::roundToIntegral(APFloat::roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
const APFloat &Hi = getFirst();
const APFloat &Lo = getSecond();
@@ -5309,22 +5299,28 @@ void DoubleAPFloat::makeZero(bool Neg) {
}
void DoubleAPFloat::makeLargest(bool Neg) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- Floats[0] = APFloat(semIEEEdouble, APInt(64, 0x7fefffffffffffffull));
- Floats[1] = APFloat(semIEEEdouble, APInt(64, 0x7c8ffffffffffffeull));
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ Floats[0] =
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, 0x7fefffffffffffffull));
+ Floats[1] =
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, 0x7c8ffffffffffffeull));
if (Neg)
changeSign();
}
void DoubleAPFloat::makeSmallest(bool Neg) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
Floats[0].makeSmallest(Neg);
Floats[1].makeZero(/* Neg = */ false);
}
void DoubleAPFloat::makeSmallestNormalized(bool Neg) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- Floats[0] = APFloat(semIEEEdouble, APInt(64, 0x0360000000000000ull));
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ Floats[0] =
+ APFloat(APFloatBase::semIEEEdouble, APInt(64, 0x0360000000000000ull));
if (Neg)
Floats[0].changeSign();
Floats[1].makeZero(/* Neg = */ false);
@@ -5355,7 +5351,8 @@ hash_code hash_value(const DoubleAPFloat &Arg) {
}
APInt DoubleAPFloat::bitcastToAPInt() const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
uint64_t Data[] = {
Floats[0].bitcastToAPInt().getRawData()[0],
Floats[1].bitcastToAPInt().getRawData()[0],
@@ -5365,10 +5362,11 @@ APInt DoubleAPFloat::bitcastToAPInt() const {
Expected<APFloat::opStatus> DoubleAPFloat::convertFromString(StringRef S,
roundingMode RM) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat Tmp(semPPCDoubleDoubleLegacy);
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat Tmp(APFloatBase::semPPCDoubleDoubleLegacy);
auto Ret = Tmp.convertFromString(S, RM);
- *this = DoubleAPFloat(semPPCDoubleDouble, Tmp.bitcastToAPInt());
+ *this = DoubleAPFloat(APFloatBase::semPPCDoubleDouble, Tmp.bitcastToAPInt());
return Ret;
}
@@ -5379,7 +5377,8 @@ Expected<APFloat::opStatus> DoubleAPFloat::convertFromString(StringRef S,
// nextUp must choose the smallest output > input that follows these rules.
// nexDown must choose the largest output < input that follows these rules.
APFloat::opStatus DoubleAPFloat::next(bool nextDown) {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
// nextDown(x) = -nextUp(-x)
if (nextDown) {
changeSign();
@@ -5481,7 +5480,8 @@ APFloat::opStatus DoubleAPFloat::next(bool nextDown) {
APFloat::opStatus DoubleAPFloat::convertToSignExtendedInteger(
MutableArrayRef<integerPart> Input, unsigned int Width, bool IsSigned,
roundingMode RM, bool *IsExact) const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
// If Hi is not finite, or Lo is zero, the value is entirely represented
// by Hi. Delegate to the simpler single-APFloat conversion.
@@ -5761,8 +5761,9 @@ unsigned int DoubleAPFloat::convertToHexString(char *DST,
unsigned int HexDigits,
bool UpperCase,
roundingMode RM) const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- return APFloat(semPPCDoubleDoubleLegacy, bitcastToAPInt())
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ return APFloat(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt())
.convertToHexString(DST, HexDigits, UpperCase, RM);
}
@@ -5799,7 +5800,8 @@ bool DoubleAPFloat::isLargest() const {
}
bool DoubleAPFloat::isInteger() const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
return Floats[0].isInteger() && Floats[1].isInteger();
}
@@ -5807,8 +5809,9 @@ void DoubleAPFloat::toString(SmallVectorImpl<char> &Str,
unsigned FormatPrecision,
unsigned FormatMaxPadding,
bool TruncateZero) const {
- assert(Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- APFloat(semPPCDoubleDoubleLegacy, bitcastToAPInt())
+ assert(Semantics == &APFloatBase::semPPCDoubleDouble &&
+ "Unexpected Semantics");
+ APFloat(APFloatBase::semPPCDoubleDoubleLegacy, bitcastToAPInt())
.toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero);
}
@@ -5840,14 +5843,17 @@ int ilogb(const DoubleAPFloat &Arg) {
DoubleAPFloat scalbn(const DoubleAPFloat &Arg, int Exp,
APFloat::roundingMode RM) {
- assert(Arg.Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
- return DoubleAPFloat(semPPCDoubleDouble, scalbn(Arg.Floats[0], Exp, RM),
+ assert(Arg.Semantics == &APFloatBase::PPCDoubleDouble() &&
+ "Unexpected Semantics");
+ return DoubleAPFloat(APFloatBase::PPCDoubleDouble(),
+ scalbn(Arg.Floats[0], Exp, RM),
scalbn(Arg.Floats[1], Exp, RM));
}
DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp,
APFloat::roundingMode RM) {
- assert(Arg.Semantics == &semPPCDoubleDouble && "Unexpected Semantics");
+ assert(Arg.Semantics == &APFloatBase::PPCDoubleDouble() &&
+ "Unexpected Semantics");
// Get the unbiased exponent e of the number, where |Arg| = m * 2^e for m in
// [1.0, 2.0).
@@ -5943,7 +5949,8 @@ DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp,
}
APFloat First = scalbn(Hi, -Exp, RM);
- return DoubleAPFloat(semPPCDoubleDouble, std::move(First), std::move(Second));
+ return DoubleAPFloat(APFloatBase::PPCDoubleDouble(), std::move(First),
+ std::move(Second));
}
} // namespace detail
@@ -5955,9 +5962,8 @@ APFloat::Storage::Storage(IEEEFloat F, const fltSemantics &Semantics) {
}
if (usesLayout<DoubleAPFloat>(Semantics)) {
const fltSemantics& S = F.getSemantics();
- new (&Double)
- DoubleAPFloat(Semantics, APFloat(std::move(F), S),
- APFloat(semIEEEdouble));
+ new (&Double) DoubleAPFloat(Semantics, APFloat(std::move(F), S),
+ APFloat(APFloatBase::IEEEdouble()));
return;
}
llvm_unreachable("Unexpected semantics");
@@ -6065,8 +6071,9 @@ APFloat::opStatus APFloat::convert(const fltSemantics &ToSemantics,
return U.IEEE.convert(ToSemantics, RM, losesInfo);
if (usesLayout<IEEEFloat>(getSemantics()) &&
usesLayout<DoubleAPFloat>(ToSemantics)) {
- assert(&ToSemantics == &semPPCDoubleDouble);
- auto Ret = U.IEEE.convert(semPPCDoubleDoubleLegacy, RM, losesInfo);
+ assert(&ToSemantics == &APFloatBase::semPPCDoubleDouble);
+ auto Ret =
+ U.IEEE.convert(APFloatBase::semPPCDoubleDoubleLegacy, RM, losesInfo);
*this = APFloat(ToSemantics, U.IEEE.bitcastToAPInt());
return Ret;
}
@@ -6113,13 +6120,15 @@ APFloat::opStatus APFloat::convertToInteger(APSInt &result,
}
double APFloat::convertToDouble() const {
- if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEdouble)
+ if (&getSemantics() ==
+ (const llvm::fltSemantics *)&APFloatBase::semIEEEdouble)
return getIEEE().convertToDouble();
assert(isRepresentableBy(getSemantics(), semIEEEdouble) &&
"Float semantics is not representable by IEEEdouble");
APFloat Temp = *this;
bool LosesInfo;
- opStatus St = Temp.convert(semIEEEdouble, rmNearestTiesToEven, &LosesInfo);
+ opStatus St =
+ Temp.convert(APFloatBase::semIEEEdouble, rmNearestTiesToEven, &LosesInfo);
assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision");
(void)St;
return Temp.getIEEE().convertToDouble();
@@ -6127,13 +6136,14 @@ double APFloat::convertToDouble() const {
#ifdef HAS_IEE754_FLOAT128
float128 APFloat::convertToQuad() const {
- if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEquad)
+ if (&getSemantics() == (const llvm::fltSemantics *)&APFloatBase::semIEEEquad)
return getIEEE().convertToQuad();
assert(isRepresentableBy(getSemantics(), semIEEEquad) &&
"Float semantics is not representable by IEEEquad");
APFloat Temp = *this;
bool LosesInfo;
- opStatus St = Temp.convert(semIEEEquad, rmNearestTiesToEven, &LosesInfo);
+ opStatus St =
+ Temp.convert(APFloatBase::semIEEEquad, rmNearestTiesToEven, &LosesInfo);
assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision");
(void)St;
return Temp.getIEEE().convertToQuad();
@@ -6141,18 +6151,84 @@ float128 APFloat::convertToQuad() const {
#endif
float APFloat::convertToFloat() const {
- if (&getSemantics() == (const llvm::fltSemantics *)&semIEEEsingle)
+ if (&getSemantics() ==
+ (const llvm::fltSemantics *)&APFloatBase::semIEEEsingle)
return getIEEE().convertToFloat();
assert(isRepresentableBy(getSemantics(), semIEEEsingle) &&
"Float semantics is not representable by IEEEsingle");
APFloat Temp = *this;
bool LosesInfo;
- opStatus St = Temp.convert(semIEEEsingle, rmNearestTiesToEven, &LosesInfo);
+ opStatus St =
+ Temp.convert(APFloatBase::semIEEEsingle, rmNearestTiesToEven, &LosesInfo);
assert(!(St & opInexact) && !LosesInfo && "Unexpected imprecision");
(void)St;
return Temp.getIEEE().convertToFloat();
}
+APFloat::Storage::~Storage() {
+ if (usesLayout<IEEEFloat>(*semantics)) {
+ IEEE.~IEEEFloat();
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*semantics)) {
+ Double.~DoubleAPFloat();
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+}
+
+APFloat::Storage::Storage(const APFloat::Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(RHS.IEEE);
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(RHS.Double);
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+}
+
+APFloat::Storage::Storage(APFloat::Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(std::move(RHS.IEEE));
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(std::move(RHS.Double));
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+}
+
+APFloat::Storage &APFloat::Storage::operator=(const APFloat::Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = RHS.IEEE;
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = RHS.Double;
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(RHS);
+ }
+ return *this;
+}
+
+APFloat::Storage &APFloat::Storage::operator=(APFloat::Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = std::move(RHS.IEEE);
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = std::move(RHS.Double);
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(std::move(RHS));
+ }
+ return *this;
+}
+
} // namespace llvm
#undef APFLOAT_DISPATCH_ON_SEMANTICS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 1b559a6..8ed4062 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -514,8 +514,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
MVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
- Legal);
+ setOperationAction({ISD::ABS, ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX},
+ MVT::i32, Legal);
setOperationAction(
{ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index e0375ea..e3f3aba 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -892,6 +892,7 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// have EXEC as implicit destination. Issue a warning if encoding for
// vdst is not EXEC.
if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3) &&
+ MCII->get(MI.getOpcode()).getNumDefs() == 0 &&
MCII->get(MI.getOpcode()).hasImplicitDefOfPhysReg(AMDGPU::EXEC)) {
auto ExecEncoding = MRI.getEncodingValue(AMDGPU::EXEC_LO);
if (Bytes_[0] != ExecEncoding)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d516330..50447f4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -9072,6 +9072,67 @@ void SIInstrInfo::movePackToVALU(SIInstrWorklist &Worklist,
MachineOperand &Src1 = Inst.getOperand(2);
const DebugLoc &DL = Inst.getDebugLoc();
+ if (ST.useRealTrue16Insts()) {
+ Register SrcReg0, SrcReg1;
+ if (!Src0.isReg() || !RI.isVGPR(MRI, Src0.getReg())) {
+ SrcReg0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), SrcReg0).add(Src0);
+ } else {
+ SrcReg0 = Src0.getReg();
+ }
+
+ if (!Src1.isReg() || !RI.isVGPR(MRI, Src1.getReg())) {
+ SrcReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), SrcReg1).add(Src1);
+ } else {
+ SrcReg1 = Src1.getReg();
+ }
+
+ bool isSrc0Reg16 = MRI.constrainRegClass(SrcReg0, &AMDGPU::VGPR_16RegClass);
+ bool isSrc1Reg16 = MRI.constrainRegClass(SrcReg1, &AMDGPU::VGPR_16RegClass);
+
+ auto NewMI = BuildMI(*MBB, Inst, DL, get(AMDGPU::REG_SEQUENCE), ResultReg);
+ switch (Inst.getOpcode()) {
+ case AMDGPU::S_PACK_LL_B32_B16:
+ NewMI
+ .addReg(SrcReg0, 0,
+ isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0,
+ isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::hi16);
+ break;
+ case AMDGPU::S_PACK_LH_B32_B16:
+ NewMI
+ .addReg(SrcReg0, 0,
+ isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::hi16);
+ break;
+ case AMDGPU::S_PACK_HL_B32_B16:
+ NewMI.addReg(SrcReg0, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0,
+ isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
+ .addImm(AMDGPU::hi16);
+ break;
+ case AMDGPU::S_PACK_HH_B32_B16:
+ NewMI.addReg(SrcReg0, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::lo16)
+ .addReg(SrcReg1, 0, AMDGPU::hi16)
+ .addImm(AMDGPU::hi16);
+ break;
+ default:
+ llvm_unreachable("unhandled s_pack_* instruction");
+ }
+
+ MachineOperand &Dest = Inst.getOperand(0);
+ MRI.replaceRegWith(Dest.getReg(), ResultReg);
+ addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
+ return;
+ }
+
switch (Inst.getOpcode()) {
case AMDGPU::S_PACK_LL_B32_B16: {
Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 01a40c1..7431e11 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -47,9 +47,6 @@ private:
const MachineBasicBlock &From,
const MachineBasicBlock &To) const;
bool removeExeczBranch(MachineInstr &MI, MachineBasicBlock &SrcMBB);
- // Check if the machine instruction being processed is a supported packed
- // instruction.
- bool isUnpackingSupportedInstr(MachineInstr &MI) const;
// Creates a list of packed instructions following an MFMA that are suitable
// for unpacking.
void collectUnpackingCandidates(MachineInstr &BeginMI,
@@ -454,23 +451,6 @@ bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
return true;
}
-// If support is extended to new operations, add tests in
-// llvm/test/CodeGen/AMDGPU/unpack-non-coissue-insts-post-ra-scheduler.mir.
-bool SIPreEmitPeephole::isUnpackingSupportedInstr(MachineInstr &MI) const {
- if (!TII->isNeverCoissue(MI))
- return false;
- unsigned Opcode = MI.getOpcode();
- switch (Opcode) {
- case AMDGPU::V_PK_ADD_F32:
- case AMDGPU::V_PK_MUL_F32:
- case AMDGPU::V_PK_FMA_F32:
- return true;
- default:
- return false;
- }
- llvm_unreachable("Fully covered switch");
-}
-
bool SIPreEmitPeephole::canUnpackingClobberRegister(const MachineInstr &MI) {
unsigned OpCode = MI.getOpcode();
Register DstReg = MI.getOperand(0).getReg();
@@ -612,10 +592,13 @@ void SIPreEmitPeephole::collectUnpackingCandidates(
for (auto I = std::next(BeginMI.getIterator()); I != E; ++I) {
MachineInstr &Instr = *I;
+ uint16_t UnpackedOpCode = mapToUnpackedOpcode(Instr);
+ bool IsUnpackable =
+ !(UnpackedOpCode == std::numeric_limits<uint16_t>::max());
if (Instr.isMetaInstruction())
continue;
if ((Instr.isTerminator()) ||
- (TII->isNeverCoissue(Instr) && !isUnpackingSupportedInstr(Instr)) ||
+ (TII->isNeverCoissue(Instr) && !IsUnpackable) ||
(SIInstrInfo::modifiesModeRegister(Instr) &&
Instr.modifiesRegister(AMDGPU::EXEC, TRI)))
return;
@@ -639,7 +622,7 @@ void SIPreEmitPeephole::collectUnpackingCandidates(
if (TRI->regsOverlap(MFMADef, InstrMO.getReg()))
return;
}
- if (!isUnpackingSupportedInstr(Instr))
+ if (!IsUnpackable)
continue;
if (canUnpackingClobberRegister(Instr))
@@ -687,8 +670,8 @@ MachineInstrBuilder SIPreEmitPeephole::createUnpackedMI(MachineInstr &I,
bool IsHiBits) {
MachineBasicBlock &MBB = *I.getParent();
const DebugLoc &DL = I.getDebugLoc();
- const MachineOperand *SrcMO1 = TII->getNamedOperand(I, AMDGPU::OpName::src0);
- const MachineOperand *SrcMO2 = TII->getNamedOperand(I, AMDGPU::OpName::src1);
+ const MachineOperand *SrcMO0 = TII->getNamedOperand(I, AMDGPU::OpName::src0);
+ const MachineOperand *SrcMO1 = TII->getNamedOperand(I, AMDGPU::OpName::src1);
Register DstReg = I.getOperand(0).getReg();
unsigned OpCode = I.getOpcode();
Register UnpackedDstReg = IsHiBits ? TRI->getSubReg(DstReg, AMDGPU::sub1)
@@ -702,15 +685,15 @@ MachineInstrBuilder SIPreEmitPeephole::createUnpackedMI(MachineInstr &I,
MachineInstrBuilder NewMI = BuildMI(MBB, I, DL, TII->get(UnpackedOpcode));
NewMI.addDef(UnpackedDstReg); // vdst
- addOperandAndMods(NewMI, Src0Mods, IsHiBits, *SrcMO1);
- addOperandAndMods(NewMI, Src1Mods, IsHiBits, *SrcMO2);
+ addOperandAndMods(NewMI, Src0Mods, IsHiBits, *SrcMO0);
+ addOperandAndMods(NewMI, Src1Mods, IsHiBits, *SrcMO1);
if (AMDGPU::hasNamedOperand(OpCode, AMDGPU::OpName::src2)) {
- const MachineOperand *SrcMO3 =
+ const MachineOperand *SrcMO2 =
TII->getNamedOperand(I, AMDGPU::OpName::src2);
unsigned Src2Mods =
TII->getNamedOperand(I, AMDGPU::OpName::src2_modifiers)->getImm();
- addOperandAndMods(NewMI, Src2Mods, IsHiBits, *SrcMO3);
+ addOperandAndMods(NewMI, Src2Mods, IsHiBits, *SrcMO2);
}
NewMI.addImm(ClampVal); // clamp
// Packed instructions do not support output modifiers. safe to assign them 0
@@ -787,9 +770,13 @@ bool SIPreEmitPeephole::run(MachineFunction &MF) {
// TODO: Fold this into previous block, if possible. Evaluate and handle any
// side effects.
+
+ // Perform the extra MF scans only for supported archs
+ if (!ST.hasGFX940Insts())
+ return Changed;
for (MachineBasicBlock &MBB : MF) {
- // Unpack packed instructions overlapped by MFMAs. This allows the compiler
- // to co-issue unpacked instructions with MFMA
+ // Unpack packed instructions overlapped by MFMAs. This allows the
+ // compiler to co-issue unpacked instructions with MFMA
auto SchedModel = TII->getSchedModel();
SetVector<MachineInstr *> InstrsToUnpack;
for (auto &MI : make_early_inc_range(MBB.instrs())) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index eb87558..169465e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -24830,7 +24830,8 @@ bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// instruction, as it is usually smaller than the alternative sequence.
// TODO: Add vector division?
bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
- return OptSize && !VT.isVector();
+ return OptSize && !VT.isVector() &&
+ VT.getSizeInBits() <= getMaxDivRemBitWidthSupported();
}
bool RISCVTargetLowering::preferScalarizeSplat(SDNode *N) const {
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 1b7cb9b..636e31c 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -699,7 +699,8 @@ public:
"Can't encode VTYPE for uninitialized or unknown");
if (TWiden != 0)
return RISCVVType::encodeXSfmmVType(SEW, TWiden, AltFmt);
- return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic);
+ return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic,
+ AltFmt);
}
bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index ddb53a2..12f776b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -3775,11 +3775,13 @@ std::string RISCVInstrInfo::createMIROperandComment(
#define CASE_VFMA_OPCODE_VV(OP) \
CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
+ case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
#define CASE_VFMA_SPLATS(OP) \
CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
+ case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
// clang-format on
@@ -4003,11 +4005,13 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
// clang-format on
@@ -4469,6 +4473,20 @@ bool RISCVInstrInfo::simplifyInstruction(MachineInstr &MI) const {
CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
+
+#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
+ CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
+ case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
+
+#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
+ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
// clang-format on
MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
@@ -4478,6 +4496,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
switch (MI.getOpcode()) {
default:
return nullptr;
+ case CASE_FP_WIDEOP_OPCODE_LMULS_ALT(FWADD_ALT_WV):
+ case CASE_FP_WIDEOP_OPCODE_LMULS_ALT(FWSUB_ALT_WV):
case CASE_FP_WIDEOP_OPCODE_LMULS(FWADD_WV):
case CASE_FP_WIDEOP_OPCODE_LMULS(FWSUB_WV): {
assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
@@ -4494,6 +4514,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
llvm_unreachable("Unexpected opcode");
CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV)
CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV)
+ CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(FWADD_ALT_WV)
+ CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(FWSUB_ALT_WV)
}
// clang-format on
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
index c9c1246..9358486 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
@@ -44,6 +44,336 @@ let Predicates = [HasStdExtZvfbfmin] in {
let mayRaiseFPException = true, Predicates = [HasStdExtZvfbfwma] in
defm PseudoVFWMACCBF16 : VPseudoVWMAC_VV_VF_BF_RM;
+defset list<VTypeInfoToWide> AllWidenableIntToBF16Vectors = {
+ def : VTypeInfoToWide<VI8MF8, VBF16MF4>;
+ def : VTypeInfoToWide<VI8MF4, VBF16MF2>;
+ def : VTypeInfoToWide<VI8MF2, VBF16M1>;
+ def : VTypeInfoToWide<VI8M1, VBF16M2>;
+ def : VTypeInfoToWide<VI8M2, VBF16M4>;
+ def : VTypeInfoToWide<VI8M4, VBF16M8>;
+}
+
+multiclass VPseudoVALU_VV_VF_RM_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryFV_VV_RM<m, 16/*sew*/>,
+ SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
+ SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVALU_VF_RM_BF16 {
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
+ SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVFWALU_VV_VF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoBinaryW_VV_RM<m, sew=16>,
+ SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
+ SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVFWALU_WV_WF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoBinaryW_WV_RM<m, sew=16>,
+ SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoBinaryW_WF_RM<m, f, sew=f.SEW>,
+ SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVFMUL_VV_VF_RM_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryFV_VV_RM<m, 16/*sew*/>,
+ SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
+ SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVWMUL_VV_VF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoBinaryW_VV_RM<m, sew=16>,
+ SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
+ SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVMAC_VV_VF_AAXA_RM_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoTernaryV_VV_AAXA_RM<m, 16/*sew*/>,
+ SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
+ "ReadVFMulAddV", m.MX, 16/*sew*/>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoTernaryV_VF_AAXA_RM<m, f, f.SEW>,
+ SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
+ "ReadVFMulAddV", m.MX, f.SEW>;
+ }
+}
+
+multiclass VPseudoVWMAC_VV_VF_RM_BF16 {
+ foreach m = MxListFW in {
+ defm "" : VPseudoTernaryW_VV_RM<m, sew=16>,
+ SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
+ "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX, 16/*sew*/>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxListFW in {
+ defm "" : VPseudoTernaryW_VF_RM<m, f, sew=f.SEW>,
+ SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
+ "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX, f.SEW>;
+ }
+}
+
+multiclass VPseudoVRCP_V_BF16 {
+ foreach m = MxListF in {
+ defvar mx = m.MX;
+ let VLMul = m.value in {
+ def "_V_" # mx # "_E16"
+ : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ def "_V_" # mx # "_E16_MASK"
+ : VPseudoUnaryMask<m.vrclass, m.vrclass>,
+ RISCVMaskedPseudo<MaskIdx = 2>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ }
+ }
+}
+
+multiclass VPseudoVRCP_V_RM_BF16 {
+ foreach m = MxListF in {
+ defvar mx = m.MX;
+ let VLMul = m.value in {
+ def "_V_" # mx # "_E16"
+ : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ def "_V_" # mx # "_E16_MASK"
+ : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
+ RISCVMaskedPseudo<MaskIdx = 2>,
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/,
+ forcePassthruRead=true>;
+ }
+ }
+}
+
+multiclass VPseudoVMAX_VV_VF_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryV_VV<m, sew=16>,
+ SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV",
+ m.MX, 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF<m, f, f.SEW>,
+ SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF",
+ m.MX, f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVSGNJ_VV_VF_BF16 {
+ foreach m = MxListF in {
+ defm "" : VPseudoBinaryV_VV<m, sew=16>,
+ SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
+ 16/*sew*/, forcePassthruRead=true>;
+ }
+
+ defvar f = SCALAR_F16;
+ foreach m = f.MxList in {
+ defm "" : VPseudoBinaryV_VF<m, f, f.SEW>,
+ SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX,
+ f.SEW, forcePassthruRead=true>;
+ }
+}
+
+multiclass VPseudoVWCVTF_V_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListW in
+ defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=8,
+ TargetConstraintType=3>,
+ SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, 8/*sew*/,
+ forcePassthruRead=true>;
+}
+
+multiclass VPseudoVWCVTD_V_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListFW in
+ defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=16,
+ TargetConstraintType=3>,
+ SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, 16/*sew*/,
+ forcePassthruRead=true>;
+}
+
+multiclass VPseudoVNCVTD_W_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListFW in
+ defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, sew=16,
+ TargetConstraintType=2>,
+ SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/,
+ forcePassthruRead=true>;
+}
+
+multiclass VPseudoVNCVTD_W_RM_BF16 {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxListFW in
+ defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
+ constraint, sew=16,
+ TargetConstraintType=2>,
+ SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/,
+ forcePassthruRead=true>;
+}
+
+let Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT in {
+let mayRaiseFPException = true in {
+defm PseudoVFADD_ALT : VPseudoVALU_VV_VF_RM_BF16;
+defm PseudoVFSUB_ALT : VPseudoVALU_VV_VF_RM_BF16;
+defm PseudoVFRSUB_ALT : VPseudoVALU_VF_RM_BF16;
+}
+
+let mayRaiseFPException = true in {
+defm PseudoVFWADD_ALT : VPseudoVFWALU_VV_VF_RM_BF16;
+defm PseudoVFWSUB_ALT : VPseudoVFWALU_VV_VF_RM_BF16;
+defm PseudoVFWADD_ALT : VPseudoVFWALU_WV_WF_RM_BF16;
+defm PseudoVFWSUB_ALT : VPseudoVFWALU_WV_WF_RM_BF16;
+}
+
+let mayRaiseFPException = true in
+defm PseudoVFMUL_ALT : VPseudoVFMUL_VV_VF_RM_BF16;
+
+let mayRaiseFPException = true in
+defm PseudoVFWMUL_ALT : VPseudoVWMUL_VV_VF_RM_BF16;
+
+let mayRaiseFPException = true in {
+defm PseudoVFMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+defm PseudoVFNMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16;
+}
+
+let mayRaiseFPException = true in {
+defm PseudoVFWMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+defm PseudoVFWNMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+defm PseudoVFWMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+defm PseudoVFWNMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16;
+}
+
+let mayRaiseFPException = true in
+defm PseudoVFRSQRT7_ALT : VPseudoVRCP_V_BF16;
+
+let mayRaiseFPException = true in
+defm PseudoVFREC7_ALT : VPseudoVRCP_V_RM_BF16;
+
+let mayRaiseFPException = true in {
+defm PseudoVFMIN_ALT : VPseudoVMAX_VV_VF_BF16;
+defm PseudoVFMAX_ALT : VPseudoVMAX_VV_VF_BF16;
+}
+
+defm PseudoVFSGNJ_ALT : VPseudoVSGNJ_VV_VF_BF16;
+defm PseudoVFSGNJN_ALT : VPseudoVSGNJ_VV_VF_BF16;
+defm PseudoVFSGNJX_ALT : VPseudoVSGNJ_VV_VF_BF16;
+
+let mayRaiseFPException = true in {
+defm PseudoVMFEQ_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFNE_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFLT_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFLE_ALT : VPseudoVCMPM_VV_VF;
+defm PseudoVMFGT_ALT : VPseudoVCMPM_VF;
+defm PseudoVMFGE_ALT : VPseudoVCMPM_VF;
+}
+
+defm PseudoVFCLASS_ALT : VPseudoVCLS_V;
+
+defm PseudoVFMERGE_ALT : VPseudoVMRG_FM;
+
+defm PseudoVFMV_V_ALT : VPseudoVMV_F;
+
+let mayRaiseFPException = true in {
+defm PseudoVFWCVT_F_XU_ALT : VPseudoVWCVTF_V_BF16;
+defm PseudoVFWCVT_F_X_ALT : VPseudoVWCVTF_V_BF16;
+
+defm PseudoVFWCVT_F_F_ALT : VPseudoVWCVTD_V_BF16;
+} // mayRaiseFPException = true
+
+let mayRaiseFPException = true in {
+let hasSideEffects = 0, hasPostISelHook = 1 in {
+defm PseudoVFNCVT_XU_F_ALT : VPseudoVNCVTI_W_RM;
+defm PseudoVFNCVT_X_F_ALT : VPseudoVNCVTI_W_RM;
+}
+
+defm PseudoVFNCVT_RTZ_XU_F_ALT : VPseudoVNCVTI_W;
+defm PseudoVFNCVT_RTZ_X_F_ALT : VPseudoVNCVTI_W;
+
+defm PseudoVFNCVT_F_F_ALT : VPseudoVNCVTD_W_RM_BF16;
+
+defm PseudoVFNCVT_ROD_F_F_ALT : VPseudoVNCVTD_W_BF16;
+} // mayRaiseFPException = true
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ defvar f = SCALAR_F16;
+ let HasSEWOp = 1, BaseInstr = VFMV_F_S in
+ def "PseudoVFMV_" # f.FX # "_S_ALT" :
+ RISCVVPseudo<(outs f.fprclass:$rd), (ins VR:$rs2, sew:$sew)>,
+ Sched<[WriteVMovFS, ReadVMovFS]>;
+ let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1,
+ Constraints = "$rd = $passthru" in
+ def "PseudoVFMV_S_" # f.FX # "_ALT" :
+ RISCVVPseudo<(outs VR:$rd),
+ (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>,
+ Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>;
+}
+
+defm PseudoVFSLIDE1UP_ALT : VPseudoVSLD1_VF<"@earlyclobber $rd">;
+defm PseudoVFSLIDE1DOWN_ALT : VPseudoVSLD1_VF;
+} // Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT
+
//===----------------------------------------------------------------------===//
// Patterns
//===----------------------------------------------------------------------===//
@@ -108,6 +438,130 @@ let Predicates = [HasStdExtZvfbfmin] in {
FRM_DYN,
fvti.AVL, fvti.Log2SEW, TA_MA)>;
}
+
+ defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBF16Vectors>;
+ defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+ AllBF16Vectors, uimm5>;
+ defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
+ eew=16, vtilist=AllBF16Vectors>;
+ defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllBF16Vectors, uimm5>;
+ defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllBF16Vectors, uimm5>;
+
+ foreach fvti = AllBF16Vectors in {
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
+ fvti.Vector,
+ fvti.Vector, fvti.Vector, fvti.Mask,
+ fvti.Log2SEW, fvti.LMul, fvti.RegClass,
+ fvti.RegClass, fvti.RegClass>;
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
+ "V"#fvti.ScalarSuffix#"M",
+ fvti.Vector,
+ fvti.Vector, fvti.Scalar, fvti.Mask,
+ fvti.Log2SEW, fvti.LMul, fvti.RegClass,
+ fvti.RegClass, fvti.ScalarRegClass>;
+ defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
+ def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru),
+ (fvti.Vector fvti.RegClass:$rs2),
+ (fvti.Scalar (fpimm0)),
+ (fvti.Mask VMV0:$vm), VLOpFrag)),
+ (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
+
+ defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1,
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
+ fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+ (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))),
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+ (SplatFPOp (fvti.Scalar fpimm0)),
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+ (SplatFPOp fvti.ScalarRegClass:$rs1),
+ fvti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs2,
+ (fvti.Scalar fvti.ScalarRegClass:$rs1),
+ (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ fvti.RegClass:$rs1,
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))),
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW)>;
+
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp (fvti.Scalar fpimm0)),
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm),
+ GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp fvti.ScalarRegClass:$rs1),
+ fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2,
+ (fvti.Scalar fvti.ScalarRegClass:$rs1),
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector
+ (riscv_vrgather_vv_vl fvti.RegClass:$rs2,
+ (ivti.Vector fvti.RegClass:$rs1),
+ fvti.RegClass:$passthru,
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVRGATHER_VV_"# fvti.LMul.MX#"_E"# fvti.SEW#"_MASK")
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ def : Pat<(fvti.Vector (riscv_vrgather_vx_vl fvti.RegClass:$rs2, GPR:$rs1,
+ fvti.RegClass:$passthru,
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVRGATHER_VX_"# fvti.LMul.MX#"_MASK")
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$rs1,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ def : Pat<(fvti.Vector
+ (riscv_vrgather_vx_vl fvti.RegClass:$rs2,
+ uimm5:$imm,
+ fvti.RegClass:$passthru,
+ (fvti.Mask VMV0:$vm),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVRGATHER_VI_"# fvti.LMul.MX#"_MASK")
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, uimm5:$imm,
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ }
}
let Predicates = [HasStdExtZvfbfwma] in {
@@ -118,3 +572,224 @@ let Predicates = [HasStdExtZvfbfwma] in {
defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16",
AllWidenableBF16ToFloatVectors>;
}
+
+multiclass VPatConversionVI_VF_BF16<string intrinsic, string instruction> {
+ foreach fvti = AllBF16Vectors in {
+ defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+ let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
+ GetVTypePredicates<ivti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "V",
+ ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
+ fvti.LMul, ivti.RegClass, fvti.RegClass>;
+ }
+}
+
+multiclass VPatConversionWF_VI_BF16<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach vtiToWti = AllWidenableIntToBF16Vectors in {
+ defvar vti = vtiToWti.Vti;
+ defvar fwti = vtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "V",
+ fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
+ vti.LMul, fwti.RegClass, vti.RegClass, isSEWAware>;
+ }
+}
+
+multiclass VPatConversionWF_VF_BF16<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ let Predicates = !listconcat(GetVTypeMinimalPredicates<fvti>.Predicates,
+ GetVTypeMinimalPredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "V",
+ fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
+ fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
+ }
+}
+
+multiclass VPatConversionVI_WF_BF16<string intrinsic, string instruction> {
+ foreach vtiToWti = AllWidenableIntToBF16Vectors in {
+ defvar vti = vtiToWti.Vti;
+ defvar fwti = vtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "W",
+ vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
+ vti.LMul, vti.RegClass, fwti.RegClass>;
+ }
+}
+
+multiclass VPatConversionVI_WF_RM_BF16<string intrinsic, string instruction> {
+ foreach vtiToWti = AllWidenableIntToBF16Vectors in {
+ defvar vti = vtiToWti.Vti;
+ defvar fwti = vtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
+ vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
+ vti.LMul, vti.RegClass, fwti.RegClass>;
+ }
+}
+
+multiclass VPatConversionVF_WF_BF16<string intrinsic, string instruction,
+ bit isSEWAware = 0> {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
+ defvar fvti = fvtiToFWti.Vti;
+ defvar fwti = fvtiToFWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
+ GetVTypePredicates<fwti>.Predicates) in
+ defm : VPatConversion<intrinsic, instruction, "W",
+ fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
+ fvti.LMul, fvti.RegClass, fwti.RegClass, isSEWAware>;
+ }
+}
+
+let Predicates = [HasStdExtZvfbfa] in {
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD_ALT",
+ AllBF16Vectors, isSEWAware = 1>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB_ALT",
+ AllBF16Vectors, isSEWAware = 1>;
+defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB_ALT",
+ AllBF16Vectors, isSEWAware = 1>;
+defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX_ALT",
+ AllBF16Vectors, isSEWAware=1>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT_ALT", AllBF16Vectors>;
+defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE_ALT", AllBF16Vectors>;
+defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT_ALT", AllBF16Vectors>;
+defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE_ALT", AllBF16Vectors>;
+defm : VPatConversionVI_VF_BF16<"int_riscv_vfclass", "PseudoVFCLASS_ALT">;
+foreach vti = AllBF16Vectors in {
+ let Predicates = GetVTypePredicates<vti>.Predicates in
+ defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE_ALT",
+ "V"#vti.ScalarSuffix#"M",
+ vti.Vector,
+ vti.Vector, vti.Scalar, vti.Mask,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, vti.ScalarRegClass>;
+}
+defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU_ALT",
+ isSEWAware=1>;
+defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X_ALT",
+ isSEWAware=1>;
+defm : VPatConversionWF_VF_BF16<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F_ALT",
+ isSEWAware=1>;
+defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F_ALT">;
+defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F_ALT">;
+defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F_ALT">;
+defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F_ALT">;
+defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F_ALT",
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
+defm : VPatConversionVF_WF_BF16<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F_ALT",
+ isSEWAware=1>;
+defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP_ALT", AllBF16Vectors>;
+defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN_ALT", AllBF16Vectors>;
+
+foreach fvti = AllBF16Vectors in {
+ defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+ let Predicates = GetVTypePredicates<ivti>.Predicates in {
+ // 13.16. Vector Floating-Point Move Instruction
+ // If we're splatting fpimm0, use vmv.v.x vd, x0.
+ def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
+ fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)),
+ (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
+ $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>;
+ def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
+ fvti.Vector:$passthru, (fvti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)),
+ (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX)
+ $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>;
+ }
+
+ let Predicates = GetVTypePredicates<fvti>.Predicates in {
+ def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
+ fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMV_V_ALT_" # fvti.ScalarSuffix # "_" #
+ fvti.LMul.MX)
+ $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2),
+ GPR:$vl, fvti.Log2SEW, TU_MU)>;
+ }
+}
+
+foreach vti = NoGroupBF16Vectors in {
+ let Predicates = GetVTypePredicates<vti>.Predicates in {
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
+ (vti.Scalar (fpimm0)),
+ VLOpFrag)),
+ (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
+ (vti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))),
+ VLOpFrag)),
+ (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
+ vti.ScalarRegClass:$rs1,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_ALT")
+ vti.RegClass:$passthru,
+ (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
+ }
+
+ defvar vfmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_",
+ vti.ScalarSuffix,
+ "_S_ALT"));
+ // Only pattern-match extract-element operations where the index is 0. Any
+ // other index will have been custom-lowered to slide the vector correctly
+ // into place.
+ let Predicates = GetVTypePredicates<vti>.Predicates in
+ def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
+ (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
+}
+} // Predicates = [HasStdExtZvfbfa]
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 6acf799..334db4b 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -288,9 +288,12 @@ public:
bool hasVInstructionsI64() const { return HasStdExtZve64x; }
bool hasVInstructionsF16Minimal() const { return HasStdExtZvfhmin; }
bool hasVInstructionsF16() const { return HasStdExtZvfh; }
- bool hasVInstructionsBF16Minimal() const { return HasStdExtZvfbfmin; }
+ bool hasVInstructionsBF16Minimal() const {
+ return HasStdExtZvfbfmin || HasStdExtZvfbfa;
+ }
bool hasVInstructionsF32() const { return HasStdExtZve32f; }
bool hasVInstructionsF64() const { return HasStdExtZve64d; }
+ bool hasVInstructionsBF16() const { return HasStdExtZvfbfa; }
// F16 and F64 both require F32.
bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); }
bool hasVInstructionsFullMultiply() const { return HasStdExtV; }
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 6760f5a..61a0bbe 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -1200,6 +1200,23 @@ void addOpAccessChainReqs(const MachineInstr &Instr,
return;
}
+ bool IsNonUniform =
+ hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
+
+ auto FirstIndexReg = Instr.getOperand(3).getReg();
+ bool FirstIndexIsConstant =
+ Subtarget.getInstrInfo()->isConstantInstr(*MRI.getVRegDef(FirstIndexReg));
+
+ if (StorageClass == SPIRV::StorageClass::StorageClass::StorageBuffer) {
+ if (IsNonUniform)
+ Handler.addRequirements(
+ SPIRV::Capability::StorageBufferArrayNonUniformIndexingEXT);
+ else if (!FirstIndexIsConstant)
+ Handler.addRequirements(
+ SPIRV::Capability::StorageBufferArrayDynamicIndexing);
+ return;
+ }
+
Register PointeeTypeReg = ResTypeInst->getOperand(2).getReg();
MachineInstr *PointeeType = MRI.getUniqueVRegDef(PointeeTypeReg);
if (PointeeType->getOpcode() != SPIRV::OpTypeImage &&
@@ -1208,27 +1225,25 @@ void addOpAccessChainReqs(const MachineInstr &Instr,
return;
}
- bool IsNonUniform =
- hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
if (isUniformTexelBuffer(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
} else if (isInputAttachment(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
} else if (isStorageTexelBuffer(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
} else if (isSampledImage(PointeeType) ||
@@ -1237,14 +1252,14 @@ void addOpAccessChainReqs(const MachineInstr &Instr,
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::SampledImageArrayDynamicIndexing);
} else if (isStorageImage(PointeeType)) {
if (IsNonUniform)
Handler.addRequirements(
SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
- else
+ else if (!FirstIndexIsConstant)
Handler.addRequirements(
SPIRV::Capability::StorageImageArrayDynamicIndexing);
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2feee05..b05d7c7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -44813,10 +44813,16 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
}
case X86ISD::PCMPGT:
// icmp sgt(0, R) == ashr(R, BitWidth-1).
- // iff we only need the sign bit then we can use R directly.
- if (OriginalDemandedBits.isSignMask() &&
- ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
- return TLO.CombineTo(Op, Op.getOperand(1));
+ if (ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode())) {
+ // iff we only need the signbit then we can use R directly.
+ if (OriginalDemandedBits.isSignMask())
+ return TLO.CombineTo(Op, Op.getOperand(1));
+ // otherwise we just need R's signbit for the comparison.
+ APInt SignMask = APInt::getSignMask(BitWidth);
+ if (SimplifyDemandedBits(Op.getOperand(1), SignMask, OriginalDemandedElts,
+ Known, TLO, Depth + 1))
+ return true;
+ }
break;
case X86ISD::MOVMSK: {
SDValue Src = Op.getOperand(0);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 1d2cd39..5c23f91 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -10809,39 +10809,27 @@ void X86InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
if (!ST.hasSSE1())
return;
- // PXOR is safe to use because it doesn't affect flags.
- BuildMI(MBB, Iter, DL, get(X86::PXORrr), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ BuildMI(MBB, Iter, DL, get(X86::V_SET0), Reg);
} else if (X86::VR256RegClass.contains(Reg)) {
// YMM#
if (!ST.hasAVX())
return;
- // VPXOR is safe to use because it doesn't affect flags.
- BuildMI(MBB, Iter, DL, get(X86::VPXORrr), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ BuildMI(MBB, Iter, DL, get(X86::AVX_SET0), Reg);
} else if (X86::VR512RegClass.contains(Reg)) {
// ZMM#
if (!ST.hasAVX512())
return;
- // VPXORY is safe to use because it doesn't affect flags.
- BuildMI(MBB, Iter, DL, get(X86::VPXORYrr), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ BuildMI(MBB, Iter, DL, get(X86::AVX512_512_SET0), Reg);
} else if (X86::VK1RegClass.contains(Reg) || X86::VK2RegClass.contains(Reg) ||
X86::VK4RegClass.contains(Reg) || X86::VK8RegClass.contains(Reg) ||
X86::VK16RegClass.contains(Reg)) {
if (!ST.hasVLX())
return;
- // KXOR is safe to use because it doesn't affect flags.
- unsigned Op = ST.hasBWI() ? X86::KXORQkk : X86::KXORWkk;
- BuildMI(MBB, Iter, DL, get(Op), Reg)
- .addReg(Reg, RegState::Undef)
- .addReg(Reg, RegState::Undef);
+ unsigned Op = ST.hasBWI() ? X86::KSET0Q : X86::KSET0W;
+ BuildMI(MBB, Iter, DL, get(Op), Reg);
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 09cb225..a8eb9b9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -3757,6 +3757,10 @@ static Instruction *foldBitCeil(SelectInst &SI, IRBuilderBase &Builder,
// (x < y) ? -1 : zext(x > y)
// (x > y) ? 1 : sext(x != y)
// (x > y) ? 1 : sext(x < y)
+// (x == y) ? 0 : (x > y ? 1 : -1)
+// (x == y) ? 0 : (x < y ? -1 : 1)
+// Special case: x == C ? 0 : (x > C - 1 ? 1 : -1)
+// Special case: x == C ? 0 : (x < C + 1 ? -1 : 1)
// Into ucmp/scmp(x, y), where signedness is determined by the signedness
// of the comparison in the original sequence.
Instruction *InstCombinerImpl::foldSelectToCmp(SelectInst &SI) {
@@ -3849,6 +3853,44 @@ Instruction *InstCombinerImpl::foldSelectToCmp(SelectInst &SI) {
}
}
+ // Special cases with constants: x == C ? 0 : (x > C-1 ? 1 : -1)
+ if (Pred == ICmpInst::ICMP_EQ && match(TV, m_Zero())) {
+ const APInt *C;
+ if (match(RHS, m_APInt(C))) {
+ CmpPredicate InnerPred;
+ Value *InnerRHS;
+ const APInt *InnerTV, *InnerFV;
+ if (match(FV,
+ m_Select(m_ICmp(InnerPred, m_Specific(LHS), m_Value(InnerRHS)),
+ m_APInt(InnerTV), m_APInt(InnerFV)))) {
+
+ // x == C ? 0 : (x > C-1 ? 1 : -1)
+ if (ICmpInst::isGT(InnerPred) && InnerTV->isOne() &&
+ InnerFV->isAllOnes()) {
+ IsSigned = ICmpInst::isSigned(InnerPred);
+ bool CanSubOne = IsSigned ? !C->isMinSignedValue() : !C->isMinValue();
+ if (CanSubOne) {
+ APInt Cminus1 = *C - 1;
+ if (match(InnerRHS, m_SpecificInt(Cminus1)))
+ Replace = true;
+ }
+ }
+
+ // x == C ? 0 : (x < C+1 ? -1 : 1)
+ if (ICmpInst::isLT(InnerPred) && InnerTV->isAllOnes() &&
+ InnerFV->isOne()) {
+ IsSigned = ICmpInst::isSigned(InnerPred);
+ bool CanAddOne = IsSigned ? !C->isMaxSignedValue() : !C->isMaxValue();
+ if (CanAddOne) {
+ APInt Cplus1 = *C + 1;
+ if (match(InnerRHS, m_SpecificInt(Cplus1)))
+ Replace = true;
+ }
+ }
+ }
+ }
+ }
+
Intrinsic::ID IID = IsSigned ? Intrinsic::scmp : Intrinsic::ucmp;
if (Replace)
return replaceInstUsesWith(
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 6e17801..2646334 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -844,6 +844,7 @@ struct AddressSanitizer {
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
+ void markCatchParametersAsUninteresting(Function &F);
private:
friend struct FunctionStackPoisoner;
@@ -2997,6 +2998,22 @@ void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
}
}
}
+// Mitigation for https://github.com/google/sanitizers/issues/749
+// We don't instrument Windows catch-block parameters to avoid
+// interfering with exception handling assumptions.
+void AddressSanitizer::markCatchParametersAsUninteresting(Function &F) {
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ if (auto *CatchPad = dyn_cast<CatchPadInst>(&I)) {
+ // Mark the parameters to a catch-block as uninteresting to avoid
+ // instrumenting them.
+ for (Value *Operand : CatchPad->arg_operands())
+ if (auto *AI = dyn_cast<AllocaInst>(Operand))
+ ProcessedAllocas[AI] = false;
+ }
+ }
+ }
+}
bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
bool ShouldInstrument =
@@ -3041,6 +3058,9 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// can be passed to that intrinsic.
markEscapedLocalAllocas(F);
+ if (TargetTriple.isOSWindows())
+ markCatchParametersAsUninteresting(F);
+
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
diff --git a/llvm/lib/Transforms/Scalar/LoopPassManager.cpp b/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
index 7da8586..d827e64 100644
--- a/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
@@ -8,7 +8,6 @@
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
@@ -217,9 +216,6 @@ PreservedAnalyses FunctionToLoopPassAdaptor::run(Function &F,
// Get the analysis results needed by loop passes.
MemorySSA *MSSA =
UseMemorySSA ? (&AM.getResult<MemorySSAAnalysis>(F).getMSSA()) : nullptr;
- BlockFrequencyInfo *BFI = UseBlockFrequencyInfo && F.hasProfileData()
- ? (&AM.getResult<BlockFrequencyAnalysis>(F))
- : nullptr;
LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
AM.getResult<AssumptionAnalysis>(F),
AM.getResult<DominatorTreeAnalysis>(F),
@@ -227,7 +223,6 @@ PreservedAnalyses FunctionToLoopPassAdaptor::run(Function &F,
AM.getResult<ScalarEvolutionAnalysis>(F),
AM.getResult<TargetLibraryAnalysis>(F),
AM.getResult<TargetIRAnalysis>(F),
- BFI,
MSSA};
// Setup the loop analysis manager from its proxy. It is important that
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index ed4e2b1..3487e81 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -97,6 +97,12 @@ static cl::opt<MatrixLayoutTy> MatrixLayout(
static cl::opt<bool> PrintAfterTransposeOpt("matrix-print-after-transpose-opt",
cl::init(false));
+static cl::opt<unsigned> SplitMatmulRemainderOverThreshold(
+ "matrix-split-matmul-remainder-over-threshold", cl::Hidden,
+ cl::desc("Illegal remainder vectors over this size in bits should be split "
+ "in the inner loop of matmul"),
+ cl::init(0));
+
/// Helper function to either return Scope, if it is a subprogram or the
/// attached subprogram for a local scope.
static DISubprogram *getSubprogram(DIScope *Scope) {
@@ -1720,6 +1726,31 @@ public:
ToRemove.push_back(MatMul);
}
+ /// Given \p Remainder iterations of the the matmul inner loop,
+ /// potentially lower \p Blocksize that is used for the underlying
+ /// vector.
+ unsigned capBlockSize(unsigned BlockSize, unsigned Remainder, Type *EltType) {
+ if (BlockSize <= Remainder)
+ return BlockSize;
+
+ // If the remainder is also a legal type just use it.
+ auto *VecTy = FixedVectorType::get(EltType, Remainder);
+ if (TTI.isTypeLegal(VecTy))
+ return Remainder;
+
+ // Similarly, if the vector is small enough that we don't want
+ // to split further.
+ if (VecTy->getPrimitiveSizeInBits() <= SplitMatmulRemainderOverThreshold)
+ return Remainder;
+
+ // Gradually lower the vectorization factor to cover the
+ // remainder.
+ do {
+ BlockSize /= 2;
+ } while (BlockSize > Remainder);
+ return BlockSize;
+ }
+
/// Compute \p Result += \p A * \p B for input matrices with left-associating
/// addition.
///
@@ -1757,10 +1788,8 @@ public:
bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
for (unsigned I = 0; I < R; I += BlockSize) {
- // Gradually lower the vectorization factor to cover the remainder.
- while (I + BlockSize > R)
- BlockSize /= 2;
-
+ // Lower block size to make sure we stay within bounds.
+ BlockSize = capBlockSize(BlockSize, R - I, Result.getElementType());
Value *Sum = IsTiled ? Result.extractVector(I, J, BlockSize, Builder)
: nullptr;
for (unsigned K = 0; K < M; ++K) {
@@ -1785,9 +1814,8 @@ public:
unsigned BlockSize = VF;
bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
for (unsigned J = 0; J < C; J += BlockSize) {
- // Gradually lower the vectorization factor to cover the remainder.
- while (J + BlockSize > C)
- BlockSize /= 2;
+ // Lower the vectorization factor to cover the remainder.
+ BlockSize = capBlockSize(BlockSize, C - J, Result.getElementType());
Value *Sum = nullptr;
for (unsigned K = 0; K < M; ++K) {
diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index e4ba70d..5af6c96 100644
--- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -27,7 +27,6 @@
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/MustExecute.h"
-#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -3611,8 +3610,7 @@ static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI,
AssumptionCache &AC, AAResults &AA,
TargetTransformInfo &TTI, bool Trivial,
bool NonTrivial, ScalarEvolution *SE,
- MemorySSAUpdater *MSSAU, ProfileSummaryInfo *PSI,
- BlockFrequencyInfo *BFI, LPMUpdater &LoopUpdater) {
+ MemorySSAUpdater *MSSAU, LPMUpdater &LoopUpdater) {
assert(L.isRecursivelyLCSSAForm(DT, LI) &&
"Loops must be in LCSSA form before unswitching.");
@@ -3652,35 +3650,6 @@ static bool unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI,
if (F->hasOptSize())
return false;
- // Returns true if Loop L's loop nest is cold, i.e. if the headers of L,
- // of the loops L is nested in, and of the loops nested in L are all cold.
- auto IsLoopNestCold = [&](const Loop *L) {
- // Check L and all of its parent loops.
- auto *Parent = L;
- while (Parent) {
- if (!PSI->isColdBlock(Parent->getHeader(), BFI))
- return false;
- Parent = Parent->getParentLoop();
- }
- // Next check all loops nested within L.
- SmallVector<const Loop *, 4> Worklist;
- llvm::append_range(Worklist, L->getSubLoops());
- while (!Worklist.empty()) {
- auto *CurLoop = Worklist.pop_back_val();
- if (!PSI->isColdBlock(CurLoop->getHeader(), BFI))
- return false;
- llvm::append_range(Worklist, CurLoop->getSubLoops());
- }
- return true;
- };
-
- // Skip cold loops in cold loop nests, as unswitching them brings little
- // benefit but increases the code size
- if (PSI && PSI->hasProfileSummary() && BFI && IsLoopNestCold(&L)) {
- LLVM_DEBUG(dbgs() << " Skip cold loop: " << L << "\n");
- return false;
- }
-
// Perform legality checks.
if (!isSafeForNoNTrivialUnswitching(L, LI))
return false;
@@ -3705,11 +3674,6 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM,
LPMUpdater &U) {
Function &F = *L.getHeader()->getParent();
(void)F;
- ProfileSummaryInfo *PSI = nullptr;
- if (auto OuterProxy =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR)
- .getCachedResult<ModuleAnalysisManagerFunctionProxy>(F))
- PSI = OuterProxy->getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L
<< "\n");
@@ -3720,7 +3684,7 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM,
AR.MSSA->verifyMemorySSA();
}
if (!unswitchLoop(L, AR.DT, AR.LI, AR.AC, AR.AA, AR.TTI, Trivial, NonTrivial,
- &AR.SE, MSSAU ? &*MSSAU : nullptr, PSI, AR.BFI, U))
+ &AR.SE, MSSAU ? &*MSSAU : nullptr, U))
return PreservedAnalyses::all();
if (AR.MSSA && VerifyMemorySSA)
diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
index 9693ae6..b80c3c9 100644
--- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp
+++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
@@ -634,18 +634,10 @@ private:
/// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
/// changes.
bool mergeInValue(ValueLatticeElement &IV, Value *V,
- ValueLatticeElement MergeWithV,
+ const ValueLatticeElement &MergeWithV,
ValueLatticeElement::MergeOptions Opts = {
/*MayIncludeUndef=*/false, /*CheckWiden=*/false});
- bool mergeInValue(Value *V, ValueLatticeElement MergeWithV,
- ValueLatticeElement::MergeOptions Opts = {
- /*MayIncludeUndef=*/false, /*CheckWiden=*/false}) {
- assert(!V->getType()->isStructTy() &&
- "non-structs should use markConstant");
- return mergeInValue(ValueState[V], V, MergeWithV, Opts);
- }
-
/// getValueState - Return the ValueLatticeElement object that corresponds to
/// the value. This function handles the case when the value hasn't been seen
/// yet by properly seeding constants etc.
@@ -987,7 +979,7 @@ public:
void trackValueOfArgument(Argument *A) {
if (A->getType()->isStructTy())
return (void)markOverdefined(A);
- mergeInValue(A, getArgAttributeVL(A));
+ mergeInValue(ValueState[A], A, getArgAttributeVL(A));
}
bool isStructLatticeConstant(Function *F, StructType *STy);
@@ -1128,8 +1120,7 @@ bool SCCPInstVisitor::isStructLatticeConstant(Function *F, StructType *STy) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
assert(It != TrackedMultipleRetVals.end());
- ValueLatticeElement LV = It->second;
- if (!SCCPSolver::isConstant(LV))
+ if (!SCCPSolver::isConstant(It->second))
return false;
}
return true;
@@ -1160,7 +1151,7 @@ Constant *SCCPInstVisitor::getConstantOrNull(Value *V) const {
std::vector<Constant *> ConstVals;
auto *ST = cast<StructType>(V->getType());
for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
- ValueLatticeElement LV = LVs[I];
+ const ValueLatticeElement &LV = LVs[I];
ConstVals.push_back(SCCPSolver::isConstant(LV)
? getConstant(LV, ST->getElementType(I))
: UndefValue::get(ST->getElementType(I)));
@@ -1225,7 +1216,7 @@ void SCCPInstVisitor::visitInstruction(Instruction &I) {
}
bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
- ValueLatticeElement MergeWithV,
+ const ValueLatticeElement &MergeWithV,
ValueLatticeElement::MergeOptions Opts) {
if (IV.mergeIn(MergeWithV, Opts)) {
pushUsersToWorkList(V);
@@ -1264,7 +1255,7 @@ void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
return;
}
- ValueLatticeElement BCValue = getValueState(BI->getCondition());
+ const ValueLatticeElement &BCValue = getValueState(BI->getCondition());
ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
if (!CI) {
// Overdefined condition variables, and branches on unfoldable constant
@@ -1326,7 +1317,7 @@ void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
// the target as executable.
if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
// Casts are folded by visitCastInst.
- ValueLatticeElement IBRValue = getValueState(IBR->getAddress());
+ const ValueLatticeElement &IBRValue = getValueState(IBR->getAddress());
BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(
getConstant(IBRValue, IBR->getAddress()->getType()));
if (!Addr) { // Overdefined or unknown condition?
@@ -1408,7 +1399,7 @@ void SCCPInstVisitor::visitPHINode(PHINode &PN) {
if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
continue;
- ValueLatticeElement IV = getValueState(PN.getIncomingValue(i));
+ const ValueLatticeElement &IV = getValueState(PN.getIncomingValue(i));
PhiState.mergeIn(IV);
NumActiveIncoming++;
if (PhiState.isOverdefined())
@@ -1420,10 +1411,10 @@ void SCCPInstVisitor::visitPHINode(PHINode &PN) {
// extensions to match the number of active incoming values. This helps to
// limit multiple extensions caused by the same incoming value, if other
// incoming values are equal.
- mergeInValue(&PN, PhiState,
+ ValueLatticeElement &PhiStateRef = ValueState[&PN];
+ mergeInValue(PhiStateRef, &PN, PhiState,
ValueLatticeElement::MergeOptions().setMaxWidenSteps(
NumActiveIncoming + 1));
- ValueLatticeElement &PhiStateRef = getValueState(&PN);
PhiStateRef.setNumRangeExtensions(
std::max(NumActiveIncoming, PhiStateRef.getNumRangeExtensions()));
}
@@ -1481,7 +1472,7 @@ void SCCPInstVisitor::visitCastInst(CastInst &I) {
}
}
- ValueLatticeElement OpSt = getValueState(I.getOperand(0));
+ const ValueLatticeElement &OpSt = getValueState(I.getOperand(0));
if (OpSt.isUnknownOrUndef())
return;
@@ -1496,9 +1487,9 @@ void SCCPInstVisitor::visitCastInst(CastInst &I) {
if (I.getDestTy()->isIntOrIntVectorTy() &&
I.getSrcTy()->isIntOrIntVectorTy() &&
I.getOpcode() != Instruction::BitCast) {
- auto &LV = getValueState(&I);
ConstantRange OpRange =
OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
+ auto &LV = getValueState(&I);
Type *DestTy = I.getDestTy();
ConstantRange Res = ConstantRange::getEmpty(DestTy->getScalarSizeInBits());
@@ -1516,19 +1507,24 @@ void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
const WithOverflowInst *WO,
unsigned Idx) {
Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
- ValueLatticeElement L = getValueState(LHS);
- ValueLatticeElement R = getValueState(RHS);
+ Type *Ty = LHS->getType();
+
addAdditionalUser(LHS, &EVI);
addAdditionalUser(RHS, &EVI);
- if (L.isUnknownOrUndef() || R.isUnknownOrUndef())
- return; // Wait to resolve.
- Type *Ty = LHS->getType();
+ const ValueLatticeElement &L = getValueState(LHS);
+ if (L.isUnknownOrUndef())
+ return; // Wait to resolve.
ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
+
+ const ValueLatticeElement &R = getValueState(RHS);
+ if (R.isUnknownOrUndef())
+ return; // Wait to resolve.
+
ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
if (Idx == 0) {
ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
- mergeInValue(&EVI, ValueLatticeElement::getRange(Res));
+ mergeInValue(ValueState[&EVI], &EVI, ValueLatticeElement::getRange(Res));
} else {
assert(Idx == 1 && "Index can only be 0 or 1");
ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
@@ -1560,7 +1556,7 @@ void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
return handleExtractOfWithOverflow(EVI, WO, i);
ValueLatticeElement EltVal = getStructValueState(AggVal, i);
- mergeInValue(getValueState(&EVI), &EVI, EltVal);
+ mergeInValue(ValueState[&EVI], &EVI, EltVal);
} else {
// Otherwise, must be extracting from an array.
return (void)markOverdefined(&EVI);
@@ -1616,14 +1612,18 @@ void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
if (ValueState[&I].isOverdefined())
return (void)markOverdefined(&I);
- ValueLatticeElement CondValue = getValueState(I.getCondition());
+ const ValueLatticeElement &CondValue = getValueState(I.getCondition());
if (CondValue.isUnknownOrUndef())
return;
if (ConstantInt *CondCB =
getConstantInt(CondValue, I.getCondition()->getType())) {
Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
- mergeInValue(&I, getValueState(OpVal));
+ const ValueLatticeElement &OpValState = getValueState(OpVal);
+ // Safety: ValueState[&I] doesn't invalidate OpValState since it is already
+ // in the map.
+ assert(ValueState.contains(&I) && "&I is not in ValueState map.");
+ mergeInValue(ValueState[&I], &I, OpValState);
return;
}
@@ -1721,7 +1721,7 @@ void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
// being a special floating value.
ValueLatticeElement NewV;
NewV.markConstant(C, /*MayIncludeUndef=*/true);
- return (void)mergeInValue(&I, NewV);
+ return (void)mergeInValue(ValueState[&I], &I, NewV);
}
}
@@ -1741,7 +1741,7 @@ void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
else
R = A.binaryOp(BO->getOpcode(), B);
- mergeInValue(&I, ValueLatticeElement::getRange(R));
+ mergeInValue(ValueState[&I], &I, ValueLatticeElement::getRange(R));
// TODO: Currently we do not exploit special values that produce something
// better than overdefined with an overdefined operand for vector or floating
@@ -1767,7 +1767,7 @@ void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
if (C) {
ValueLatticeElement CV;
CV.markConstant(C);
- mergeInValue(&I, CV);
+ mergeInValue(ValueState[&I], &I, CV);
return;
}
@@ -1802,7 +1802,7 @@ void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
Operands.reserve(I.getNumOperands());
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
- ValueLatticeElement State = getValueState(I.getOperand(i));
+ const ValueLatticeElement &State = getValueState(I.getOperand(i));
if (State.isUnknownOrUndef())
return; // Operands are not resolved yet.
@@ -1881,14 +1881,13 @@ void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
if (ValueState[&I].isOverdefined())
return (void)markOverdefined(&I);
- ValueLatticeElement PtrVal = getValueState(I.getOperand(0));
+ const ValueLatticeElement &PtrVal = getValueState(I.getOperand(0));
if (PtrVal.isUnknownOrUndef())
return; // The pointer is not resolved yet!
- ValueLatticeElement &IV = ValueState[&I];
-
if (SCCPSolver::isConstant(PtrVal)) {
Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
+ ValueLatticeElement &IV = ValueState[&I];
// load null is undefined.
if (isa<ConstantPointerNull>(Ptr)) {
@@ -1916,7 +1915,7 @@ void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
}
// Fall back to metadata.
- mergeInValue(&I, getValueFromMetadata(&I));
+ mergeInValue(ValueState[&I], &I, getValueFromMetadata(&I));
}
void SCCPInstVisitor::visitCallBase(CallBase &CB) {
@@ -1944,7 +1943,7 @@ void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
return markOverdefined(&CB); // Can't handle struct args.
if (A.get()->getType()->isMetadataTy())
continue; // Carried in CB, not allowed in Operands.
- ValueLatticeElement State = getValueState(A);
+ const ValueLatticeElement &State = getValueState(A);
if (State.isUnknownOrUndef())
return; // Operands are not resolved yet.
@@ -1964,7 +1963,7 @@ void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
}
// Fall back to metadata.
- mergeInValue(&CB, getValueFromMetadata(&CB));
+ mergeInValue(ValueState[&CB], &CB, getValueFromMetadata(&CB));
}
void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
@@ -1992,10 +1991,11 @@ void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
getMaxWidenStepsOpts());
}
- } else
- mergeInValue(&*AI,
- getValueState(*CAI).intersect(getArgAttributeVL(&*AI)),
- getMaxWidenStepsOpts());
+ } else {
+ ValueLatticeElement CallArg =
+ getValueState(*CAI).intersect(getArgAttributeVL(&*AI));
+ mergeInValue(ValueState[&*AI], &*AI, CallArg, getMaxWidenStepsOpts());
+ }
}
}
}
@@ -2076,7 +2076,8 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
if (II->getIntrinsicID() == Intrinsic::vscale) {
unsigned BitWidth = CB.getType()->getScalarSizeInBits();
const ConstantRange Result = getVScaleRange(II->getFunction(), BitWidth);
- return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
+ return (void)mergeInValue(ValueState[II], II,
+ ValueLatticeElement::getRange(Result));
}
if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
@@ -2094,7 +2095,8 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
ConstantRange Result =
ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
- return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
+ return (void)mergeInValue(ValueState[II], II,
+ ValueLatticeElement::getRange(Result));
}
}
@@ -2121,7 +2123,7 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
return handleCallOverdefined(CB); // Not tracking this callee.
// If so, propagate the return value of the callee into this call result.
- mergeInValue(&CB, TFRVI->second, getMaxWidenStepsOpts());
+ mergeInValue(ValueState[&CB], &CB, TFRVI->second, getMaxWidenStepsOpts());
}
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index b62c8f1..9cd52da 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2242,8 +2242,49 @@ public:
/// may not be necessary.
bool isLoadCombineCandidate(ArrayRef<Value *> Stores) const;
bool isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
- Align Alignment, const int64_t Diff, Value *Ptr0,
- Value *PtrN, StridedPtrInfo &SPtrInfo) const;
+ Align Alignment, const int64_t Diff,
+ const size_t Sz) const;
+
+ /// Return true if an array of scalar loads can be replaced with a strided
+ /// load (with constant stride).
+ ///
+ /// TODO:
+ /// It is possible that the load gets "widened". Suppose that originally each
+ /// load loads `k` bytes and `PointerOps` can be arranged as follows (`%s` is
+ /// constant): %b + 0 * %s + 0 %b + 0 * %s + 1 %b + 0 * %s + 2
+ /// ...
+ /// %b + 0 * %s + (w - 1)
+ ///
+ /// %b + 1 * %s + 0
+ /// %b + 1 * %s + 1
+ /// %b + 1 * %s + 2
+ /// ...
+ /// %b + 1 * %s + (w - 1)
+ /// ...
+ ///
+ /// %b + (n - 1) * %s + 0
+ /// %b + (n - 1) * %s + 1
+ /// %b + (n - 1) * %s + 2
+ /// ...
+ /// %b + (n - 1) * %s + (w - 1)
+ ///
+ /// In this case we will generate a strided load of type `<n x (k * w)>`.
+ ///
+ /// \param PointerOps list of pointer arguments of loads.
+ /// \param ElemTy original scalar type of loads.
+ /// \param Alignment alignment of the first load.
+ /// \param SortedIndices is the order of PointerOps as returned by
+ /// `sortPtrAccesses`
+ /// \param Diff Pointer difference between the lowest and the highes pointer
+ /// in `PointerOps` as returned by `getPointersDiff`.
+ /// \param Ptr0 first pointer in `PointersOps`.
+ /// \param PtrN last pointer in `PointersOps`.
+ /// \param SPtrInfo If the function return `true`, it also sets all the fields
+ /// of `SPtrInfo` necessary to generate the strided load later.
+ bool analyzeConstantStrideCandidate(
+ const ArrayRef<Value *> PointerOps, Type *ElemTy, Align Alignment,
+ const SmallVectorImpl<unsigned> &SortedIndices, const int64_t Diff,
+ Value *Ptr0, Value *PtrN, StridedPtrInfo &SPtrInfo) const;
/// Return true if an array of scalar loads can be replaced with a strided
/// load (with run-time stride).
@@ -6849,9 +6890,8 @@ isMaskedLoadCompress(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
/// current graph (for masked gathers extra extractelement instructions
/// might be required).
bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
- Align Alignment, const int64_t Diff, Value *Ptr0,
- Value *PtrN, StridedPtrInfo &SPtrInfo) const {
- const size_t Sz = PointerOps.size();
+ Align Alignment, const int64_t Diff,
+ const size_t Sz) const {
if (Diff % (Sz - 1) != 0)
return false;
@@ -6875,27 +6915,40 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
return false;
if (!TTI->isLegalStridedLoadStore(VecTy, Alignment))
return false;
+ return true;
+ }
+ return false;
+}
- // Iterate through all pointers and check if all distances are
- // unique multiple of Dist.
- SmallSet<int64_t, 4> Dists;
- for (Value *Ptr : PointerOps) {
- int64_t Dist = 0;
- if (Ptr == PtrN)
- Dist = Diff;
- else if (Ptr != Ptr0)
- Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
- // If the strides are not the same or repeated, we can't
- // vectorize.
- if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second)
- break;
- }
- if (Dists.size() == Sz) {
- Type *StrideTy = DL->getIndexType(Ptr0->getType());
- SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride);
- SPtrInfo.Ty = getWidenedType(ScalarTy, Sz);
- return true;
- }
+bool BoUpSLP::analyzeConstantStrideCandidate(
+ const ArrayRef<Value *> PointerOps, Type *ScalarTy, Align Alignment,
+ const SmallVectorImpl<unsigned> &SortedIndices, const int64_t Diff,
+ Value *Ptr0, Value *PtrN, StridedPtrInfo &SPtrInfo) const {
+ const size_t Sz = PointerOps.size();
+ if (!isStridedLoad(PointerOps, ScalarTy, Alignment, Diff, Sz))
+ return false;
+
+ int64_t Stride = Diff / static_cast<int64_t>(Sz - 1);
+
+ // Iterate through all pointers and check if all distances are
+ // unique multiple of Dist.
+ SmallSet<int64_t, 4> Dists;
+ for (Value *Ptr : PointerOps) {
+ int64_t Dist = 0;
+ if (Ptr == PtrN)
+ Dist = Diff;
+ else if (Ptr != Ptr0)
+ Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
+ // If the strides are not the same or repeated, we can't
+ // vectorize.
+ if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second)
+ break;
+ }
+ if (Dists.size() == Sz) {
+ Type *StrideTy = DL->getIndexType(Ptr0->getType());
+ SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride);
+ SPtrInfo.Ty = getWidenedType(ScalarTy, Sz);
+ return true;
}
return false;
}
@@ -6995,8 +7048,8 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
Align Alignment =
cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
->getAlign();
- if (isStridedLoad(PointerOps, ScalarTy, Alignment, *Diff, Ptr0, PtrN,
- SPtrInfo))
+ if (analyzeConstantStrideCandidate(PointerOps, ScalarTy, Alignment, Order,
+ *Diff, Ptr0, PtrN, SPtrInfo))
return LoadsState::StridedVectorize;
}
if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 0e0b042..84d2ea6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -407,6 +407,10 @@ public:
VPBasicBlock *getParent() { return Parent; }
const VPBasicBlock *getParent() const { return Parent; }
+ /// \return the VPRegionBlock which the recipe belongs to.
+ VPRegionBlock *getRegion();
+ const VPRegionBlock *getRegion() const;
+
/// The method which generates the output IR instructions that correspond to
/// this VPRecipe, thereby "executing" the VPlan.
virtual void execute(VPTransformState &State) = 0;
@@ -4075,6 +4079,14 @@ public:
}
};
+inline VPRegionBlock *VPRecipeBase::getRegion() {
+ return getParent()->getParent();
+}
+
+inline const VPRegionBlock *VPRecipeBase::getRegion() const {
+ return getParent()->getParent();
+}
+
/// VPlan models a candidate for vectorization, encoding various decisions take
/// to produce efficient output IR, including which branches, basic-blocks and
/// output IR instructions to generate, and their cost. VPlan holds a
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index f413c63..7e074c1 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -377,7 +377,7 @@ bool VPDominatorTree::properlyDominates(const VPRecipeBase *A,
#ifndef NDEBUG
auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
- auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
+ VPRegionBlock *Region = R->getRegion();
if (Region && Region->isReplicator()) {
assert(Region->getNumSuccessors() == 1 &&
Region->getNumPredecessors() == 1 && "Expected SESE region!");
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 7a98c75..d1e67e6b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2352,7 +2352,7 @@ bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
return false;
auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
- auto *CanIV = getParent()->getParent()->getCanonicalIV();
+ auto *CanIV = getRegion()->getCanonicalIV();
return StartC && StartC->isZero() && StepC && StepC->isOne() &&
getScalarType() == CanIV->getScalarType();
}
@@ -3076,7 +3076,7 @@ static void scalarizeInstruction(const Instruction *Instr,
State.AC->registerAssumption(II);
assert(
- (RepRecipe->getParent()->getParent() ||
+ (RepRecipe->getRegion() ||
!RepRecipe->getParent()->getPlan()->getVectorLoopRegion() ||
all_of(RepRecipe->operands(),
[](VPValue *Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
@@ -3268,7 +3268,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
to_vector(operands()), VF);
// If the recipe is not predicated (i.e. not in a replicate region), return
// the scalar cost. Otherwise handle predicated cost.
- if (!getParent()->getParent()->isReplicator())
+ if (!getRegion()->isReplicator())
return ScalarCost;
// Account for the phi nodes that we will create.
@@ -3284,7 +3284,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
case Instruction::Store: {
// TODO: See getMemInstScalarizationCost for how to handle replicating and
// predicated cases.
- const VPRegionBlock *ParentRegion = getParent()->getParent();
+ const VPRegionBlock *ParentRegion = getRegion();
if (ParentRegion && ParentRegion->isReplicator())
break;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index cae9aee8..f5f616f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1858,8 +1858,8 @@ static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
return nullptr;
VPRegionBlock *EnclosingLoopRegion =
HoistCandidate->getParent()->getEnclosingLoopRegion();
- assert((!HoistCandidate->getParent()->getParent() ||
- HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) &&
+ assert((!HoistCandidate->getRegion() ||
+ HoistCandidate->getRegion() == EnclosingLoopRegion) &&
"CFG in VPlan should still be flat, without replicate regions");
// Hoist candidate was already visited, no need to hoist.
if (!Visited.insert(HoistCandidate).second)
@@ -2898,7 +2898,7 @@ void VPlanTransforms::replaceSymbolicStrides(
// evolution.
auto CanUseVersionedStride = [&Plan](VPUser &U, unsigned) {
auto *R = cast<VPRecipeBase>(&U);
- return R->getParent()->getParent() ||
+ return R->getRegion() ||
R->getParent() == Plan.getVectorLoopRegion()->getSinglePredecessor();
};
ValueToSCEVMapTy RewriteMap;
@@ -3803,8 +3803,7 @@ void VPlanTransforms::materializeBuildVectors(VPlan &Plan) {
continue;
auto *DefR = cast<VPRecipeWithIRFlags>(&R);
auto UsesVectorOrInsideReplicateRegion = [DefR, LoopRegion](VPUser *U) {
- VPRegionBlock *ParentRegion =
- cast<VPRecipeBase>(U)->getParent()->getParent();
+ VPRegionBlock *ParentRegion = cast<VPRecipeBase>(U)->getRegion();
return !U->usesScalars(DefR) || ParentRegion != LoopRegion;
};
if ((isa<VPReplicateRecipe>(DefR) &&
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.h b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
index cf95ac0..9a2497e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
@@ -64,7 +64,7 @@ inline bool isSingleScalar(const VPValue *VPV) {
return true;
if (auto *Rep = dyn_cast<VPReplicateRecipe>(VPV)) {
- const VPRegionBlock *RegionOfR = Rep->getParent()->getParent();
+ const VPRegionBlock *RegionOfR = Rep->getRegion();
// Don't consider recipes in replicate regions as uniform yet; their first
// lane cannot be accessed when executing the replicate region for other
// lanes.
diff --git a/llvm/test/CodeGen/AMDGPU/abs_i16.ll b/llvm/test/CodeGen/AMDGPU/abs_i16.ll
index 7633ba0..66cc7f3 100644
--- a/llvm/test/CodeGen/AMDGPU/abs_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/abs_i16.ll
@@ -15,7 +15,7 @@ define i16 @abs_i16(i16 %arg) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX6-NEXT: v_max_i32_e32 v0, v1, v0
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: abs_i16:
@@ -23,7 +23,7 @@ define i16 @abs_i16(i16 %arg) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT: v_sub_i32_e32 v1, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v1, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: abs_i16:
@@ -97,9 +97,9 @@ define <2 x i16> @v_abs_v2i16(<2 x i16> %arg) {
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX6-NEXT: v_max_i32_e32 v0, v2, v0
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -110,9 +110,9 @@ define <2 x i16> @v_abs_v2i16(<2 x i16> %arg) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX7-NEXT: v_sub_i32_e32 v2, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_i32_e32 v0, v2, v0
; GFX7-NEXT: v_sub_i32_e32 v2, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_max_i32_e32 v1, v2, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
; GFX7-NEXT: s_setpc_b64 s[30:31]
@@ -172,15 +172,15 @@ define <3 x i16> @v_abs_v3i16(<3 x i16> %arg) {
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v3, v0
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v2
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_max_i32_e32 v2, v3, v2
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v3i16:
@@ -189,15 +189,15 @@ define <3 x i16> @v_abs_v3i16(<3 x i16> %arg) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v3, v0
+; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
+; GFX7-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_max_i32_e32 v2, v3, v2
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v3i16:
@@ -262,47 +262,45 @@ define <4 x i16> @v_abs_v4i16(<4 x i16> %arg) {
; GFX6-LABEL: v_abs_v4i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v0
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v4, v0
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v1
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v4, v1
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v4, v2
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v4
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v4
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v4i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v0
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v4, v0
+; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v1
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v4, v1
; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v4
+; GFX7-NEXT: v_max_i32_e32 v2, v4, v2
; GFX7-NEXT: v_sub_i32_e32 v4, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v4
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v4
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v4i16:
@@ -370,63 +368,61 @@ define <6 x i16> @v_abs_v6i16(<6 x i16> %arg) {
; GFX6-LABEL: v_abs_v6i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v6, v0
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v6, v1
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v4
+; GFX6-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v5
+; GFX6-NEXT: v_max_i32_e32 v5, v6, v5
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v6
; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX6-NEXT: v_max_i32_e32 v2, v6, v2
; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v6
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v5
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v6
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v3
-; GFX6-NEXT: v_max_i32_e32 v1, v4, v1
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX6-NEXT: v_or_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v6i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v6, v0
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v6, v1
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v4
+; GFX7-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v5
+; GFX7-NEXT: v_max_i32_e32 v5, v6, v5
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v6
; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_i32_e32 v2, v6, v2
; GFX7-NEXT: v_sub_i32_e32 v6, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v6
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_sub_i32_e32 v1, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v3
-; GFX7-NEXT: v_max_i32_e32 v1, v4, v1
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX7-NEXT: v_or_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v6i16:
@@ -509,83 +505,79 @@ define <8 x i16> @v_abs_v8i16(<8 x i16> %arg) {
; GFX6-LABEL: v_abs_v8i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v8, v0
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v8, v1
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v4
; GFX6-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v5
; GFX6-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v5, v8, v5
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v6
-; GFX6-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_max_i32_e32 v6, v8, v6
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v7
-; GFX6-NEXT: v_max_i32_e32 v7, v7, v8
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX6-NEXT: v_max_i32_e32 v7, v8, v7
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v6, v6, v8
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, v8, v2
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v8, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v8
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v5, v6, v5, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v8i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v8, v0
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v8, v1
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v4
; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v5
; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v8, v5
; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v6
-; GFX7-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX7-NEXT: v_max_i32_e32 v6, v8, v6
; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v7
-; GFX7-NEXT: v_max_i32_e32 v7, v7, v8
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX7-NEXT: v_max_i32_e32 v7, v8, v7
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v8
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v2
+; GFX7-NEXT: v_max_i32_e32 v2, v8, v2
+; GFX7-NEXT: v_sub_i32_e32 v8, vcc, 0, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v8, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v8
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v8i16:
@@ -682,155 +674,147 @@ define <16 x i16> @v_abs_v16i16(<16 x i16> %arg) {
; GFX6-LABEL: v_abs_v16i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v16, v0
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v16, v1
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v4
+; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v4, v16, v4
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v5
+; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v5, v16, v5
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v8
+; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v8, v16, v8
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v9
+; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v9, v16, v9
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v12
; GFX6-NEXT: v_bfe_i32 v14, v14, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v12, v16, v12
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v13
; GFX6-NEXT: v_bfe_i32 v15, v15, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v13, v16, v13
; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v14
-; GFX6-NEXT: v_max_i32_e32 v14, v14, v16
+; GFX6-NEXT: v_max_i32_e32 v14, v16, v14
; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v15
-; GFX6-NEXT: v_max_i32_e32 v15, v15, v16
-; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX6-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX6-NEXT: v_max_i32_e32 v13, v13, v15
+; GFX6-NEXT: v_max_i32_e32 v15, v16, v15
; GFX6-NEXT: v_bfe_i32 v10, v10, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX6-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX6-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX6-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX6-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX6-NEXT: v_max_i32_e32 v9, v9, v11
+; GFX6-NEXT: v_or_b32_e32 v14, v14, v16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v10
+; GFX6-NEXT: v_max_i32_e32 v10, v16, v10
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v11
+; GFX6-NEXT: v_max_i32_e32 v11, v16, v11
; GFX6-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v11
; GFX6-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX6-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX6-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX6-NEXT: v_or_b32_e32 v10, v10, v16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v6
+; GFX6-NEXT: v_max_i32_e32 v6, v16, v6
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v7
+; GFX6-NEXT: v_max_i32_e32 v7, v16, v7
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v7
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v6, v6, v16
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, v16, v2
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v16, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v16, 16, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v16
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX6-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX6-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX6-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX6-NEXT: v_lshrrev_b32_e32 v15, 16, v14
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX6-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX6-NEXT: v_alignbit_b32 v13, v14, v13, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v16i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v16, v0
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v16, v1
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v4
+; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v16, v4
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v5
+; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v16, v5
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v8
+; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v8, v16, v8
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v9
+; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v9, v16, v9
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v12
; GFX7-NEXT: v_bfe_i32 v14, v14, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v12, v16, v12
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v13
; GFX7-NEXT: v_bfe_i32 v15, v15, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v13, v16, v13
; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v14
-; GFX7-NEXT: v_max_i32_e32 v14, v14, v16
+; GFX7-NEXT: v_max_i32_e32 v14, v16, v14
; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v15
-; GFX7-NEXT: v_max_i32_e32 v15, v15, v16
-; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX7-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX7-NEXT: v_max_i32_e32 v13, v13, v15
+; GFX7-NEXT: v_max_i32_e32 v15, v16, v15
; GFX7-NEXT: v_bfe_i32 v10, v10, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX7-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX7-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX7-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX7-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX7-NEXT: v_max_i32_e32 v9, v9, v11
+; GFX7-NEXT: v_or_b32_e32 v14, v14, v16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v10
+; GFX7-NEXT: v_max_i32_e32 v10, v16, v10
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v11
+; GFX7-NEXT: v_max_i32_e32 v11, v16, v11
; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v11
; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX7-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX7-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v6
+; GFX7-NEXT: v_max_i32_e32 v6, v16, v6
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v7
+; GFX7-NEXT: v_max_i32_e32 v7, v16, v7
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v7
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v16
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v2
+; GFX7-NEXT: v_max_i32_e32 v2, v16, v2
+; GFX7-NEXT: v_sub_i32_e32 v16, vcc, 0, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v16, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v16
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX7-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX7-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v14
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX7-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX7-NEXT: v_alignbit_b32 v13, v14, v13, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v16i16:
@@ -974,303 +958,287 @@ define <32 x i16> @v_abs_v32i16(<32 x i16> %arg) {
; GFX6-LABEL: v_abs_v32i16:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v0
+; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v0, v31, v0
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v1
+; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v1, v31, v1
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v4
+; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v4, v31, v4
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v5
+; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v5, v31, v5
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v8
+; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v8, v31, v8
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v9
+; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v9, v31, v9
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v12
+; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v12, v31, v12
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v13
+; GFX6-NEXT: v_bfe_i32 v16, v16, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v13, v31, v13
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v16
+; GFX6-NEXT: v_bfe_i32 v17, v17, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v16, v31, v16
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v17
+; GFX6-NEXT: v_bfe_i32 v20, v20, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v17, v31, v17
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v20
+; GFX6-NEXT: v_bfe_i32 v21, v21, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v20, v31, v20
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v21
+; GFX6-NEXT: v_bfe_i32 v24, v24, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v21, v31, v21
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
+; GFX6-NEXT: v_bfe_i32 v25, v25, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v24, v31, v24
+; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX6-NEXT: v_bfe_i32 v28, v28, 0, 16
+; GFX6-NEXT: v_max_i32_e32 v25, v31, v25
; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v28
; GFX6-NEXT: v_bfe_i32 v29, v29, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v28, v28, v31
+; GFX6-NEXT: v_max_i32_e32 v28, v31, v28
; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v29
; GFX6-NEXT: v_bfe_i32 v30, v30, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v29, v29, v31
+; GFX6-NEXT: v_max_i32_e32 v29, v31, v29
; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v30
+; GFX6-NEXT: v_max_i32_e32 v30, v31, v30
+; GFX6-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX6-NEXT: v_bfe_i32 v26, v26, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v30, v30, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v26
; GFX6-NEXT: v_bfe_i32 v27, v27, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v26, v26, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v27
-; GFX6-NEXT: v_bfe_i32 v24, v24, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v27, v27, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
-; GFX6-NEXT: v_bfe_i32 v25, v25, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v24, v24, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX6-NEXT: v_bfe_i32 v22, v22, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v25, v25, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v22
; GFX6-NEXT: v_bfe_i32 v23, v23, 0, 16
-; GFX6-NEXT: v_max_i32_e32 v22, v22, v31
-; GFX6-NEXT: v_sub_i32_e32 v31, vcc, 0, v23
-; GFX6-NEXT: v_max_i32_e32 v23, v23, v31
-; GFX6-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX6-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX6-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; GFX6-NEXT: v_or_b32_e32 v22, v22, v23
-; GFX6-NEXT: v_or_b32_e32 v24, v24, v25
-; GFX6-NEXT: v_bfe_i32 v21, v21, 0, 16
-; GFX6-NEXT: v_bfe_i32 v20, v20, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v29, 16, v29
-; GFX6-NEXT: v_or_b32_e32 v28, v28, v29
-; GFX6-NEXT: v_sub_i32_e32 v29, vcc, 0, v20
-; GFX6-NEXT: v_max_i32_e32 v20, v20, v29
; GFX6-NEXT: v_bfe_i32 v18, v18, 0, 16
; GFX6-NEXT: v_bfe_i32 v19, v19, 0, 16
-; GFX6-NEXT: v_bfe_i32 v16, v16, 0, 16
-; GFX6-NEXT: v_bfe_i32 v17, v17, 0, 16
; GFX6-NEXT: v_bfe_i32 v14, v14, 0, 16
; GFX6-NEXT: v_bfe_i32 v15, v15, 0, 16
-; GFX6-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX6-NEXT: v_bfe_i32 v13, v13, 0, 16
; GFX6-NEXT: v_bfe_i32 v10, v10, 0, 16
; GFX6-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX6-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX6-NEXT: v_bfe_i32 v9, v9, 0, 16
; GFX6-NEXT: v_bfe_i32 v6, v6, 0, 16
; GFX6-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX6-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX6-NEXT: v_bfe_i32 v5, v5, 0, 16
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 16
; GFX6-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX6-NEXT: v_lshlrev_b32_e32 v27, 16, v27
-; GFX6-NEXT: v_or_b32_e32 v26, v26, v27
-; GFX6-NEXT: v_lshrrev_b32_e32 v27, 16, v26
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_bfe_i32 v23, v31, 0, 16
-; GFX6-NEXT: v_sub_i32_e32 v25, vcc, 0, v23
-; GFX6-NEXT: v_max_i32_e32 v23, v23, v25
-; GFX6-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX6-NEXT: v_or_b32_e32 v30, v30, v23
-; GFX6-NEXT: v_sub_i32_e32 v23, vcc, 0, v21
-; GFX6-NEXT: v_max_i32_e32 v21, v21, v23
-; GFX6-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX6-NEXT: v_or_b32_e32 v20, v20, v21
-; GFX6-NEXT: v_sub_i32_e32 v21, vcc, 0, v18
-; GFX6-NEXT: v_max_i32_e32 v18, v18, v21
-; GFX6-NEXT: v_sub_i32_e32 v21, vcc, 0, v19
-; GFX6-NEXT: v_max_i32_e32 v19, v19, v21
-; GFX6-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; GFX6-NEXT: v_or_b32_e32 v18, v18, v19
-; GFX6-NEXT: v_sub_i32_e32 v19, vcc, 0, v16
-; GFX6-NEXT: v_max_i32_e32 v16, v16, v19
-; GFX6-NEXT: v_sub_i32_e32 v19, vcc, 0, v17
-; GFX6-NEXT: v_max_i32_e32 v17, v17, v19
-; GFX6-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; GFX6-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX6-NEXT: v_sub_i32_e32 v17, vcc, 0, v14
-; GFX6-NEXT: v_max_i32_e32 v14, v14, v17
-; GFX6-NEXT: v_sub_i32_e32 v17, vcc, 0, v15
-; GFX6-NEXT: v_max_i32_e32 v15, v15, v17
-; GFX6-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX6-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX6-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX6-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX6-NEXT: v_max_i32_e32 v13, v13, v15
-; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX6-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX6-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX6-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX6-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX6-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX6-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX6-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX6-NEXT: v_max_i32_e32 v9, v9, v11
-; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX6-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX6-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX6-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX6-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
-; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX6-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX6-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT: v_max_i32_e32 v1, v1, v3
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX6-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX6-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX6-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; GFX6-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX6-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX6-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX6-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX6-NEXT: v_alignbit_b32 v17, v18, v16, 16
-; GFX6-NEXT: v_alignbit_b32 v21, v22, v20, 16
-; GFX6-NEXT: v_alignbit_b32 v25, v26, v24, 16
-; GFX6-NEXT: v_alignbit_b32 v29, v30, v28, 16
-; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX6-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX6-NEXT: v_lshrrev_b32_e32 v15, 16, v14
-; GFX6-NEXT: v_lshrrev_b32_e32 v19, 16, v18
-; GFX6-NEXT: v_lshrrev_b32_e32 v23, 16, v22
-; GFX6-NEXT: v_lshrrev_b32_e32 v31, 16, v30
+; GFX6-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX6-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX6-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX6-NEXT: v_or_b32_e32 v16, v16, v17
+; GFX6-NEXT: v_or_b32_e32 v20, v20, v21
+; GFX6-NEXT: v_or_b32_e32 v24, v24, v25
+; GFX6-NEXT: v_or_b32_e32 v28, v28, v29
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: v_bfe_i32 v31, v31, 0, 16
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v31
+; GFX6-NEXT: v_max_i32_e32 v31, v32, v31
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v31
+; GFX6-NEXT: v_or_b32_e32 v30, v30, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v26
+; GFX6-NEXT: v_max_i32_e32 v26, v32, v26
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v27
+; GFX6-NEXT: v_max_i32_e32 v27, v32, v27
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v27
+; GFX6-NEXT: v_or_b32_e32 v26, v26, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v22
+; GFX6-NEXT: v_max_i32_e32 v22, v32, v22
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v23
+; GFX6-NEXT: v_max_i32_e32 v23, v32, v23
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v23
+; GFX6-NEXT: v_or_b32_e32 v22, v22, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v18
+; GFX6-NEXT: v_max_i32_e32 v18, v32, v18
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v19
+; GFX6-NEXT: v_max_i32_e32 v19, v32, v19
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v19
+; GFX6-NEXT: v_or_b32_e32 v18, v18, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v14
+; GFX6-NEXT: v_max_i32_e32 v14, v32, v14
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v15
+; GFX6-NEXT: v_max_i32_e32 v15, v32, v15
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX6-NEXT: v_or_b32_e32 v14, v14, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v10
+; GFX6-NEXT: v_max_i32_e32 v10, v32, v10
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v11
+; GFX6-NEXT: v_max_i32_e32 v11, v32, v11
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v11
+; GFX6-NEXT: v_or_b32_e32 v10, v10, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v6
+; GFX6-NEXT: v_max_i32_e32 v6, v32, v6
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v7
+; GFX6-NEXT: v_max_i32_e32 v7, v32, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v7
+; GFX6-NEXT: v_or_b32_e32 v6, v6, v32
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, v32, v2
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, v32, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v32, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v32
+; GFX6-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX6-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX6-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX6-NEXT: v_alignbit_b32 v13, v14, v13, 16
+; GFX6-NEXT: v_alignbit_b32 v17, v18, v17, 16
+; GFX6-NEXT: v_alignbit_b32 v21, v22, v21, 16
+; GFX6-NEXT: v_alignbit_b32 v25, v26, v25, 16
+; GFX6-NEXT: v_alignbit_b32 v29, v30, v29, 16
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_abs_v32i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v0
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v31, v0
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v1
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v31, v1
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v4
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v31, v4
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v5
+; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v31, v5
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v8
+; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v8, v31, v8
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v9
+; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v9, v31, v9
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v12
+; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v12, v31, v12
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v13
+; GFX7-NEXT: v_bfe_i32 v16, v16, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v13, v31, v13
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v16
+; GFX7-NEXT: v_bfe_i32 v17, v17, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v16, v31, v16
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v17
+; GFX7-NEXT: v_bfe_i32 v20, v20, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v17, v31, v17
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v20
+; GFX7-NEXT: v_bfe_i32 v21, v21, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v20, v31, v20
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v21
+; GFX7-NEXT: v_bfe_i32 v24, v24, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v21, v31, v21
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
+; GFX7-NEXT: v_bfe_i32 v25, v25, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v24, v31, v24
+; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX7-NEXT: v_bfe_i32 v28, v28, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v25, v31, v25
; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v28
; GFX7-NEXT: v_bfe_i32 v29, v29, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v28, v28, v31
+; GFX7-NEXT: v_max_i32_e32 v28, v31, v28
; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v29
; GFX7-NEXT: v_bfe_i32 v30, v30, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v29, v29, v31
+; GFX7-NEXT: v_max_i32_e32 v29, v31, v29
; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v30
+; GFX7-NEXT: v_max_i32_e32 v30, v31, v30
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX7-NEXT: v_bfe_i32 v26, v26, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v30, v30, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v26
; GFX7-NEXT: v_bfe_i32 v27, v27, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v26, v26, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v27
-; GFX7-NEXT: v_bfe_i32 v24, v24, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v27, v27, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v24
-; GFX7-NEXT: v_bfe_i32 v25, v25, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v24, v24, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v25
; GFX7-NEXT: v_bfe_i32 v22, v22, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v25, v25, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v22
; GFX7-NEXT: v_bfe_i32 v23, v23, 0, 16
-; GFX7-NEXT: v_max_i32_e32 v22, v22, v31
-; GFX7-NEXT: v_sub_i32_e32 v31, vcc, 0, v23
-; GFX7-NEXT: v_max_i32_e32 v23, v23, v31
-; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX7-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX7-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; GFX7-NEXT: v_or_b32_e32 v22, v22, v23
-; GFX7-NEXT: v_or_b32_e32 v24, v24, v25
-; GFX7-NEXT: v_bfe_i32 v21, v21, 0, 16
-; GFX7-NEXT: v_bfe_i32 v20, v20, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v29, 16, v29
-; GFX7-NEXT: v_or_b32_e32 v28, v28, v29
-; GFX7-NEXT: v_sub_i32_e32 v29, vcc, 0, v20
-; GFX7-NEXT: v_max_i32_e32 v20, v20, v29
; GFX7-NEXT: v_bfe_i32 v18, v18, 0, 16
; GFX7-NEXT: v_bfe_i32 v19, v19, 0, 16
-; GFX7-NEXT: v_bfe_i32 v16, v16, 0, 16
-; GFX7-NEXT: v_bfe_i32 v17, v17, 0, 16
; GFX7-NEXT: v_bfe_i32 v14, v14, 0, 16
; GFX7-NEXT: v_bfe_i32 v15, v15, 0, 16
-; GFX7-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GFX7-NEXT: v_bfe_i32 v13, v13, 0, 16
; GFX7-NEXT: v_bfe_i32 v10, v10, 0, 16
; GFX7-NEXT: v_bfe_i32 v11, v11, 0, 16
-; GFX7-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GFX7-NEXT: v_bfe_i32 v9, v9, 0, 16
; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
-; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
-; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v27, 16, v27
-; GFX7-NEXT: v_or_b32_e32 v26, v26, v27
-; GFX7-NEXT: v_lshrrev_b32_e32 v27, 16, v26
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_bfe_i32 v23, v31, 0, 16
-; GFX7-NEXT: v_sub_i32_e32 v25, vcc, 0, v23
-; GFX7-NEXT: v_max_i32_e32 v23, v23, v25
-; GFX7-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX7-NEXT: v_or_b32_e32 v30, v30, v23
-; GFX7-NEXT: v_sub_i32_e32 v23, vcc, 0, v21
-; GFX7-NEXT: v_max_i32_e32 v21, v21, v23
-; GFX7-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX7-NEXT: v_or_b32_e32 v20, v20, v21
-; GFX7-NEXT: v_sub_i32_e32 v21, vcc, 0, v18
-; GFX7-NEXT: v_max_i32_e32 v18, v18, v21
-; GFX7-NEXT: v_sub_i32_e32 v21, vcc, 0, v19
-; GFX7-NEXT: v_max_i32_e32 v19, v19, v21
-; GFX7-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; GFX7-NEXT: v_or_b32_e32 v18, v18, v19
-; GFX7-NEXT: v_sub_i32_e32 v19, vcc, 0, v16
-; GFX7-NEXT: v_max_i32_e32 v16, v16, v19
-; GFX7-NEXT: v_sub_i32_e32 v19, vcc, 0, v17
-; GFX7-NEXT: v_max_i32_e32 v17, v17, v19
-; GFX7-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; GFX7-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX7-NEXT: v_sub_i32_e32 v17, vcc, 0, v14
-; GFX7-NEXT: v_max_i32_e32 v14, v14, v17
-; GFX7-NEXT: v_sub_i32_e32 v17, vcc, 0, v15
-; GFX7-NEXT: v_max_i32_e32 v15, v15, v17
-; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX7-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
-; GFX7-NEXT: v_max_i32_e32 v12, v12, v15
-; GFX7-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
-; GFX7-NEXT: v_max_i32_e32 v13, v13, v15
-; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v10
-; GFX7-NEXT: v_max_i32_e32 v10, v10, v13
-; GFX7-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
-; GFX7-NEXT: v_max_i32_e32 v11, v11, v13
-; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v8
-; GFX7-NEXT: v_max_i32_e32 v8, v8, v11
-; GFX7-NEXT: v_sub_i32_e32 v11, vcc, 0, v9
-; GFX7-NEXT: v_max_i32_e32 v9, v9, v11
-; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v6
-; GFX7-NEXT: v_max_i32_e32 v6, v6, v9
-; GFX7-NEXT: v_sub_i32_e32 v9, vcc, 0, v7
-; GFX7-NEXT: v_max_i32_e32 v7, v7, v9
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v4
-; GFX7-NEXT: v_max_i32_e32 v4, v4, v7
-; GFX7-NEXT: v_sub_i32_e32 v7, vcc, 0, v5
-; GFX7-NEXT: v_max_i32_e32 v5, v5, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v2
-; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
-; GFX7-NEXT: v_sub_i32_e32 v5, vcc, 0, v3
-; GFX7-NEXT: v_max_i32_e32 v3, v3, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v0
-; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
-; GFX7-NEXT: v_sub_i32_e32 v3, vcc, 0, v1
-; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX7-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX7-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; GFX7-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_alignbit_b32 v1, v2, v0, 16
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v4, 16
-; GFX7-NEXT: v_alignbit_b32 v9, v10, v8, 16
-; GFX7-NEXT: v_alignbit_b32 v13, v14, v12, 16
-; GFX7-NEXT: v_alignbit_b32 v17, v18, v16, 16
-; GFX7-NEXT: v_alignbit_b32 v21, v22, v20, 16
-; GFX7-NEXT: v_alignbit_b32 v25, v26, v24, 16
-; GFX7-NEXT: v_alignbit_b32 v29, v30, v28, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v14
-; GFX7-NEXT: v_lshrrev_b32_e32 v19, 16, v18
-; GFX7-NEXT: v_lshrrev_b32_e32 v23, 16, v22
-; GFX7-NEXT: v_lshrrev_b32_e32 v31, 16, v30
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_or_b32_e32 v16, v16, v17
+; GFX7-NEXT: v_or_b32_e32 v20, v20, v21
+; GFX7-NEXT: v_or_b32_e32 v24, v24, v25
+; GFX7-NEXT: v_or_b32_e32 v28, v28, v29
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v31, v31, 0, 16
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v31
+; GFX7-NEXT: v_max_i32_e32 v31, v32, v31
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v31
+; GFX7-NEXT: v_or_b32_e32 v30, v30, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v26
+; GFX7-NEXT: v_max_i32_e32 v26, v32, v26
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v27
+; GFX7-NEXT: v_max_i32_e32 v27, v32, v27
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v27
+; GFX7-NEXT: v_or_b32_e32 v26, v26, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v22
+; GFX7-NEXT: v_max_i32_e32 v22, v32, v22
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v23
+; GFX7-NEXT: v_max_i32_e32 v23, v32, v23
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v23
+; GFX7-NEXT: v_or_b32_e32 v22, v22, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v18
+; GFX7-NEXT: v_max_i32_e32 v18, v32, v18
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v19
+; GFX7-NEXT: v_max_i32_e32 v19, v32, v19
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v19
+; GFX7-NEXT: v_or_b32_e32 v18, v18, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v14
+; GFX7-NEXT: v_max_i32_e32 v14, v32, v14
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v15
+; GFX7-NEXT: v_max_i32_e32 v15, v32, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX7-NEXT: v_or_b32_e32 v14, v14, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v10
+; GFX7-NEXT: v_max_i32_e32 v10, v32, v10
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v11
+; GFX7-NEXT: v_max_i32_e32 v11, v32, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v6
+; GFX7-NEXT: v_max_i32_e32 v6, v32, v6
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v7
+; GFX7-NEXT: v_max_i32_e32 v7, v32, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v32
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v2
+; GFX7-NEXT: v_max_i32_e32 v2, v32, v2
+; GFX7-NEXT: v_sub_i32_e32 v32, vcc, 0, v3
+; GFX7-NEXT: v_max_i32_e32 v3, v32, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v32, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v32
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX7-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX7-NEXT: v_alignbit_b32 v13, v14, v13, 16
+; GFX7-NEXT: v_alignbit_b32 v17, v18, v17, 16
+; GFX7-NEXT: v_alignbit_b32 v21, v22, v21, 16
+; GFX7-NEXT: v_alignbit_b32 v25, v26, v25, 16
+; GFX7-NEXT: v_alignbit_b32 v29, v30, v29, 16
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_abs_v32i16:
diff --git a/llvm/test/CodeGen/AMDGPU/add.v2i16.ll b/llvm/test/CodeGen/AMDGPU/add.v2i16.ll
index d25bfbb..12309f3 100644
--- a/llvm/test/CodeGen/AMDGPU/add.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/add.v2i16.ll
@@ -780,7 +780,7 @@ define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
@@ -790,11 +790,9 @@ define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v1
; GFX11-TRUE16-NEXT: global_store_b128 v1, v[0:3], s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 117af95..74552a5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -29177,870 +29177,1844 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: s_branch .LBB19_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v32i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB19_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v32i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v32i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB19_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB19_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB19_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -66313,870 +67287,1844 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: s_branch .LBB43_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v32f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB43_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v32f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v32f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB43_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB43_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB43_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -101184,870 +103132,1844 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: s_branch .LBB63_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v16i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB63_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB63_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v16i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB63_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB63_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB63_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v16i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB63_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB63_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB63_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -136197,870 +139119,1844 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: s_branch .LBB79_2
;
-; GFX11-LABEL: bitcast_v64bf16_to_v16f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:168
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v184, s32
-; GFX11-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
-; GFX11-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
-; GFX11-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
-; GFX11-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
-; GFX11-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
-; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
-; GFX11-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
-; GFX11-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
-; GFX11-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
-; GFX11-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
-; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
-; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
-; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
-; GFX11-NEXT: .LBB79_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s26, 16
-; GFX11-NEXT: s_and_b32 s4, s26, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: s_lshl_b32 s7, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_and_b32 s5, s25, 0xffff0000
-; GFX11-NEXT: s_and_b32 s4, s24, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s4, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v9
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v151, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s4, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v11, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v12
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s4, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_lshl_b32 s4, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_bfe_u32 v12, v16, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s20, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v12, v16
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s4, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_bfe_u32 v18, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_and_b32 s4, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v18, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v16, v19
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v21, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v19
-; GFX11-NEXT: s_and_b32 s4, s18, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v16, v18, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v21, v17
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v13
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v17
-; GFX11-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: s_lshl_b32 s4, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v18
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v20, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_bfe_u32 v19, v22, 16, 1
-; GFX11-NEXT: s_and_b32 s4, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v16
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v19, v22
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-NEXT: s_lshl_b32 s4, s17, 16
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_bfe_u32 v24, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_and_b32 s4, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v24, v19
-; GFX11-NEXT: v_bfe_u32 v22, v25, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s4, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v21, v22, v25
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
-; GFX11-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v27, v23, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v25
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v22, v24, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v25, v27, v23
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX11-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v23
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v24
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v23, v26, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_bfe_u32 v25, v28, 16, 1
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v22
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v25, v28
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
-; GFX11-NEXT: v_bfe_u32 v30, v25, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v30, v25
-; GFX11-NEXT: v_bfe_u32 v28, v31, 16, 1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v28, v31
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v33, v29, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-NEXT: s_and_b32 s1, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v28, v30, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v33, v29
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v26
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v29
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v29, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v31, v34, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v29, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v178
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v31, v34
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; GFX11-NEXT: v_lshl_or_b32 v109, v5, 16, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
-; GFX11-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v29
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v33
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v31
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-NEXT: v_bfe_u32 v33, v35, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v180
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v33, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v178, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v36, v37
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v38
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_lshl_or_b32 v179, v32, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v136, v2, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v37, v36
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v181
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v180, v31, 16, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v33, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v37
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v39, v36, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v182, v31, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v38, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v39, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v169
-; GFX11-NEXT: v_lshl_or_b32 v181, v32, 16, v33
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_bfe_u32 v35, v37, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v34
-; GFX11-NEXT: v_bfe_u32 v32, v36, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v35, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v176
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v36
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
-; GFX11-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v49, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v170, v33, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v49, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v174
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v35, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
-; GFX11-NEXT: v_lshl_or_b32 v169, v31, 16, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v31, v36, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v177
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v31, v36
-; GFX11-NEXT: v_lshl_or_b32 v176, v33, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v36
-; GFX11-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v32
-; GFX11-NEXT: v_bfe_u32 v50, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
-; GFX11-NEXT: v_bfe_u32 v34, v35, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_bfe_u32 v49, v37, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v50, v38
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v48, v37, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v39, v38, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_lshl_or_b32 v174, v33, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v171, v32, 16, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v31, v48, v37
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v175
-; GFX11-NEXT: v_add_nc_u32_e32 v39, v39, v38
-; GFX11-NEXT: v_lshl_or_b32 v177, v35, 16, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
-; GFX11-NEXT: v_or_b32_e32 v35, 0x400000, v37
-; GFX11-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v38
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-NEXT: v_bfe_u32 v39, v34, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v173
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v37, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v39, v34
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v38, v35, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v35
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_lshl_or_b32 v122, v3, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
-; GFX11-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v35
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v36, v38, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v38
-; GFX11-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v172
-; GFX11-NEXT: v_add_nc_u32_e32 v36, v36, v38
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-NEXT: v_or_b32_e32 v55, 0x400000, v48
-; GFX11-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
-; GFX11-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: v_bfe_u32 v50, v37, 16, 1
-; GFX11-NEXT: v_bfe_u32 v38, v39, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v39
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
-; GFX11-NEXT: v_bfe_u32 v51, v48, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v38, v38, v39
-; GFX11-NEXT: v_or_b32_e32 v53, 0x400000, v37
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
-; GFX11-NEXT: v_bfe_u32 v52, v50, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v51, v51, v48
-; GFX11-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v52, v52, v50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
-; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v50
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-NEXT: v_lshl_or_b32 v184, v32, 16, v31
-; GFX11-NEXT: v_lshl_or_b32 v175, v33, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-NEXT: v_lshl_or_b32 v173, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v97, v8, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
-; GFX11-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v86, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v76, v11, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v67, v14, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v172, v37, 16, v38
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v59, v16, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v52, v18, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v46, v21, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v41, v22, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v183, v39, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v37, v24, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v34, v26, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v29, 16, v30
-; GFX11-NEXT: .LBB79_3: ; %end
-; GFX11-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
-; GFX11-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
-; GFX11-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
-; GFX11-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
-; GFX11-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
-; GFX11-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
-; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
-; GFX11-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
-; GFX11-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
-; GFX11-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v184, off, s32
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x8
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:288
-; GFX11-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
-; GFX11-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
-; GFX11-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
-; GFX11-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
-; GFX11-NEXT: v_mov_b32_e32 v28, v182
-; GFX11-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
-; GFX11-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
-; GFX11-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
-; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
-; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
-; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB79_2
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v16f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v167, v13 :: v_dual_mov_b32 v176, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v177, v11 :: v_dual_mov_b32 v178, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v179, v9 :: v_dual_mov_b32 v180, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v181, v7 :: v_dual_mov_b32 v182, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v183, v5 :: v_dual_mov_b32 v168, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v169, v3 :: v_dual_mov_b32 v170, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v171, v1 :: v_dual_mov_b32 v172, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v173, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v135, s0 :: v_dual_mov_b32 v134, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v132, s2 :: v_dual_mov_b32 v129, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v125, s16 :: v_dual_mov_b32 v120, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s18 :: v_dual_mov_b32 v107, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s20 :: v_dual_mov_b32 v90, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s22 :: v_dual_mov_b32 v69, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v57, s24 :: v_dual_mov_b32 v44, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-TRUE16-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v7, v2 :: v_dual_add_nc_u32 v7, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v9, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v1.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v44.h, v4.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v57.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v90.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v107.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v120.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v167
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v176
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v176
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v177
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v177
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v177, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v178
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v178
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v178, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v179
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v179, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v180
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v180
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v181
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v181
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v182
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v182
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v183
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v183
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v168
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v168
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v168, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v168.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v169
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v169
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v169, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v169.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v170
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v170
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v170, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v170.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v171
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v171
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v171, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v171.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v172
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v172
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v172, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v172.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v173
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v173
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v173, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v173.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v174
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v174
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v174, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v174.h, v0.l
+; GFX11-TRUE16-NEXT: .LBB79_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v125 :: v_dual_mov_b32 v5, v120
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v114 :: v_dual_mov_b32 v7, v107
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v99 :: v_dual_mov_b32 v9, v90
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v57 :: v_dual_mov_b32 v13, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v30 :: v_dual_mov_b32 v17, v173
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v174 :: v_dual_mov_b32 v19, v171
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:280
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v135 :: v_dual_mov_b32 v1, v134
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v132 :: v_dual_mov_b32 v3, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v80 :: v_dual_mov_b32 v11, v69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v182 :: v_dual_mov_b32 v27, v179
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v180 :: v_dual_mov_b32 v29, v177
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v178 :: v_dual_mov_b32 v31, v167
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v176
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB79_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166
+; GFX11-TRUE16-NEXT: s_branch .LBB79_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v16f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v44, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v45, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v46, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v47, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v56, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v57, s32 offset:252
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v58, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v59, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v60, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v61, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v62, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v63, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v72, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v78, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v79, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v88, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v89, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v90, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v91, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v92, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v107, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v108, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v109, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v110, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v111, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v120, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v121, s32 offset:124
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v122, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v123, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v124, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v125, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v126, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v127, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v136, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v137, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v138, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v139, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v140, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v141, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v142, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v143, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v152, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v153, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v154, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v155, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v156, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v171, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v172, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v173, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v174, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v175, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v184, s32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v178, v13 :: v_dual_mov_b32 v179, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v180, v11 :: v_dual_mov_b32 v181, v9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v182, v10 :: v_dual_mov_b32 v169, v7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v170, v8 :: v_dual_mov_b32 v177, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v176, v6 :: v_dual_mov_b32 v171, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s1 :: v_dual_mov_b32 v41, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v46, s16 :: v_dual_mov_b32 v59, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s17 :: v_dual_mov_b32 v67, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v76, s20 :: v_dual_mov_b32 v97, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-FAKE16-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s26, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v183
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v1, v7 :: v_dual_and_b32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v151, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v11, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v13 :: v_dual_add_nc_u32 v7, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v7, v14 :: v_dual_add_nc_u32 v10, v10, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v12, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v18, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v16, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v19
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v21, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v13
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v20 :: v_dual_add_nc_u32 v13, v16, v18
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v20, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v13, v21 :: v_dual_and_b32 v13, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v19, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v17, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v24, v19
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v18, v23 :: v_dual_and_b32 v17, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, v22, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v25
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v20, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v24, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v25, v27, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v20
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v21, v21, v26 :: v_dual_add_nc_u32 v20, v22, v24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v22, 0x7fff, v25
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v22, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v20, v27 :: v_dual_add_nc_u32 v23, v23, v26
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v25, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v30, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v31, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v24, v24, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v28, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v28, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v33, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v26
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v32 :: v_dual_add_nc_u32 v26, v28, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v34, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v26, v33, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v29, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 16, v178
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v31, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v178
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v109, v5, 16, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v28, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v33
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v30, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v179
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v37, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v180
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v33, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v180
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v33, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v178, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v36, v37
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v33, v48 :: v_dual_lshlrev_b32 v36, 16, v182
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v182
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v179, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v136, v2, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v48 :: v_dual_add_nc_u32 v38, v38, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v37, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v181
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v181
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v32, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v180, v31, 16, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, v35, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v170
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_lshlrev_b32 v36, 16, v170
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v182, v31, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v38, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v39, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v169
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v169
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v181, v32, 16, v33
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v176
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v35, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 16, v176
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v34, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v170, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v49, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v32, v32, v48 :: v_dual_add_nc_u32 v33, v37, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v33
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v37 :: v_dual_cndmask_b32 v34, v34, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 16, v174
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v36 :: v_dual_cndmask_b32 v33, v33, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v169, v31, 16, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v31, v36, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v171
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v38, 16, v177
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v31, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v176, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v33, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v32, 0x40c00000, v32 :: v_dual_lshlrev_b32 v37, 16, v171
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v34, 0x400000, v36
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v35, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v31, v31, v34 :: v_dual_add_nc_u32 v36, v37, v32
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v177
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v37, 0x40c00000, v37 :: v_dual_add_nc_u32 v34, v34, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v49, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v50, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v50, 16, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v34, v34, v48 :: v_dual_add_nc_u32 v35, v49, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v184
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v50
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v38, 0x40c00000, v48 :: v_dual_cndmask_b32 v35, v35, v49
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v48, v37, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v174, v33, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v171, v32, 16, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, v48, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v175
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 16, v175
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, v39, v38
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v177, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v31, 0x7fff, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v35, 0x400000, v37
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v34, 0x40c00000, v34
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v38
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v39, v34, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v31, v35, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v173
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v48, 16, v173
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v32, v32, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v37, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v39, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v122, v3, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v36, v49 :: v_dual_lshlrev_b32 v48, 16, v183
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v38, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v34
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v37, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v172
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v39, 16, v172
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, v36, v38
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v55, 0x400000, v48
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v37, 0x40c00000, v37
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v36, 0x7fff, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: v_bfe_u32 v50, v37, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v38, v39, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v36, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_add_nc_u32 v49, v50, v37
+; GFX11-FAKE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, v38, v39
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v53, 0x400000, v37
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v49, 0x7fff, v49
+; GFX11-FAKE16-NEXT: v_bfe_u32 v52, v50, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v51, v51, v48
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v38, 0x7fff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v52, v52, v50
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v38, v38, v54 :: v_dual_add_nc_u32 v51, 0x7fff, v51
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v39, 0x7fff, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v52, 0x400000, v50
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v38
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v51, v55, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v184, v32, 16, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v175, v33, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v37, v49, v53, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v173, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v97, v8, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v37
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v39, v39, v52, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v86, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v76, v11, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v14, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v172, v37, 16, v38
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v59, v16, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v52, v18, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v46, v21, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v41, v22, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v183, v39, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v24, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v26, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v29, 16, v30
+; GFX11-FAKE16-NEXT: .LBB79_3: ; %end
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v41 :: v_dual_mov_b32 v4, v46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v59 :: v_dual_mov_b32 v9, v86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v67 :: v_dual_mov_b32 v8, v76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v97 :: v_dual_mov_b32 v13, v136
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, v109 :: v_dual_mov_b32 v12, v122
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, v151 :: v_dual_mov_b32 v17, v172
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v175
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v173, off, s32 offset:12
+; GFX11-FAKE16-NEXT: scratch_load_b32 v172, off, s32 offset:16
+; GFX11-FAKE16-NEXT: scratch_load_b32 v171, off, s32 offset:20
+; GFX11-FAKE16-NEXT: scratch_load_b32 v170, off, s32 offset:24
+; GFX11-FAKE16-NEXT: scratch_load_b32 v169, off, s32 offset:28
+; GFX11-FAKE16-NEXT: scratch_load_b32 v168, off, s32 offset:32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v159, off, s32 offset:36
+; GFX11-FAKE16-NEXT: scratch_load_b32 v158, off, s32 offset:40
+; GFX11-FAKE16-NEXT: scratch_load_b32 v157, off, s32 offset:44
+; GFX11-FAKE16-NEXT: scratch_load_b32 v156, off, s32 offset:48
+; GFX11-FAKE16-NEXT: scratch_load_b32 v155, off, s32 offset:52
+; GFX11-FAKE16-NEXT: scratch_load_b32 v154, off, s32 offset:56
+; GFX11-FAKE16-NEXT: scratch_load_b32 v153, off, s32 offset:60
+; GFX11-FAKE16-NEXT: scratch_load_b32 v152, off, s32 offset:64
+; GFX11-FAKE16-NEXT: scratch_load_b32 v143, off, s32 offset:68
+; GFX11-FAKE16-NEXT: scratch_load_b32 v142, off, s32 offset:72
+; GFX11-FAKE16-NEXT: scratch_load_b32 v141, off, s32 offset:76
+; GFX11-FAKE16-NEXT: scratch_load_b32 v140, off, s32 offset:80
+; GFX11-FAKE16-NEXT: scratch_load_b32 v139, off, s32 offset:84
+; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:88
+; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:92
+; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:96
+; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:100
+; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:104
+; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:108
+; GFX11-FAKE16-NEXT: scratch_load_b32 v124, off, s32 offset:112
+; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
+; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
+; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
+; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
+; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
+; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
+; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:140
+; GFX11-FAKE16-NEXT: scratch_load_b32 v108, off, s32 offset:144
+; GFX11-FAKE16-NEXT: scratch_load_b32 v107, off, s32 offset:148
+; GFX11-FAKE16-NEXT: scratch_load_b32 v106, off, s32 offset:152
+; GFX11-FAKE16-NEXT: scratch_load_b32 v105, off, s32 offset:156
+; GFX11-FAKE16-NEXT: scratch_load_b32 v104, off, s32 offset:160
+; GFX11-FAKE16-NEXT: scratch_load_b32 v95, off, s32 offset:164
+; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:168
+; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:172
+; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:176
+; GFX11-FAKE16-NEXT: scratch_load_b32 v91, off, s32 offset:180
+; GFX11-FAKE16-NEXT: scratch_load_b32 v90, off, s32 offset:184
+; GFX11-FAKE16-NEXT: scratch_load_b32 v89, off, s32 offset:188
+; GFX11-FAKE16-NEXT: scratch_load_b32 v88, off, s32 offset:192
+; GFX11-FAKE16-NEXT: scratch_load_b32 v79, off, s32 offset:196
+; GFX11-FAKE16-NEXT: scratch_load_b32 v78, off, s32 offset:200
+; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:204
+; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:208
+; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:212
+; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:216
+; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:220
+; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:224
+; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:228
+; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:232
+; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:236
+; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:240
+; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
+; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
+; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
+; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
+; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
+; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
+; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:268
+; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:272
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:276
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:280
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:284
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:288
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v37 :: v_dual_mov_b32 v5, v52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v183 :: v_dual_mov_b32 v21, v177
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v176 :: v_dual_mov_b32 v27, v181
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v182
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v179 :: v_dual_mov_b32 v31, v178
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB79_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
+; GFX11-FAKE16-NEXT: s_branch .LBB79_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -154174,9 +158070,10 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_and_b32 v1, 0xff, v35
; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
@@ -154192,6 +158089,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
@@ -154202,201 +158100,169 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v6, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v66
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v37
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v36
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v118
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v39
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v2, v70
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v48
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v82
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v7, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v81
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v9, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v10, 16, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v86
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v48
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v3, v82
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v55
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v2, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v51
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v52
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v3, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v1, v84
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v85
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v10, v97
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v87
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v99
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v103
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v114
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v98
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v0, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v54
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v87
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v2, v97
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v102
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v103
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v101
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v100
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v113
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v101
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v116
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v14, v128
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v114
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v1, v113
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v117
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v112
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v117
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v102
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v130
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v133
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v14, v132
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v0, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v116
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v128
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v134
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v132
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v133
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v3, v130
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v0, v161
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v147
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v148
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v118
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v129
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v161
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v166
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v144
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v134
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v18, v147
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v167
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v0, v166
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v144
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v0, v180
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v149
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v177
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v165
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v162
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v42
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v41
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v0, v42
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v162
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v41
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v178
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v115
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v45
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v44
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v0, v45
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v115
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v44
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v119
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v59
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v56
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v0, v59
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v119
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v56
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v145
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v135
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v60
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v61
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v0, v60
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v135
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v61
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v150
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v146
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v63
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v62
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v0, v63
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v146
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v62
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v160
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v73
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v72
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v0, v73
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v160
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v72
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v176
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v164
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v75
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v74
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v0, v75
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v164
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v74
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v179
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v77
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v76
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v0, v77
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v183
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v182
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v78
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v79
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v0, v78
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v182
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v79
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v43
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v40
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v88
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v0, v89
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v40
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v88
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v46
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v91
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v90
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v0, v91
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v46
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v58
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v57
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v92
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v93
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v0, v92
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v57
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v93
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB89_3
; GFX11-TRUE16-NEXT: .LBB89_2: ; %cmp.true
@@ -154436,57 +158302,59 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(38)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(37)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v58
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(35)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v91, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(33)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v43
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v40
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v43, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(31)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v183
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v182
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v89, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(29)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v181
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v88, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v78, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v79, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v179
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v179, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v77, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(27)
@@ -154495,7 +158363,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v164
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(25)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v163
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v163, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v76, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -154506,18 +158374,18 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v74, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v73, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(23)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v150
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v150, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v72, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v146
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(21)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v145
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v135
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v63, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -154525,13 +158393,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(19)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v131
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v62, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v60, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v61, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v119
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v119, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v59, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17)
@@ -154540,29 +158408,29 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v115
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v165
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v56, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v162
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v45, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v44, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v42, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v151
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v41, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v149
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v148
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v144
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v180, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -154576,8 +158444,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v133, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v129
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v161, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v118
@@ -154585,167 +158453,141 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v117
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v116
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v116, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v147, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v114
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v114, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v132, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v130, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v103
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v98
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v54
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v103
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v103, 0x300, v0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v128, v3
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v99
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v113, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 3, v35
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 3, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v113, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v128, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v100
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v101, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v102, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v101, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v102, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v97, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v55
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v96
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v97, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v100
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xff, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v87, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v51
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v86, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v85, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v84, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v52
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v50
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v83, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v48
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v49
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v87, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v82, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v81, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v71, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v80, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v86, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v85, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v84, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v50
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v83, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v82, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v81, v5
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v38, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v71, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v80, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v37, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v35
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v69, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v112, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v68, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v67, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v66, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v32
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v65, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v70, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 3, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v69, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xff, v35
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v112, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v67, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v68, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, v66, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v34, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v50, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v15, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v116
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v129
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v114, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v144, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v115
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v135
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v145, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v119, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v182
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v2, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v36, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v65, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v34
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v36.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v33.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v133, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v160, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v179, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v103.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v114.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.h, v129.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.h, v133.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.h, v144.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.h, v145.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v115.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v119.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.h, v131.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.h, v135.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.h, v150.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.h, v160.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.h, v179.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.h, v181.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB89_3: ; %end
; GFX11-TRUE16-NEXT: s_clause 0x1e
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
@@ -168348,1575 +172190,3138 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v64bf16_to_v128i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: scratch_store_b32 off, v40, s32
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:12
-; GFX11-NEXT: s_mov_b32 exec_lo, s4
-; GFX11-NEXT: v_writelane_b32 v40, s30, 0
-; GFX11-NEXT: v_writelane_b32 v41, s96, 0
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
-; GFX11-NEXT: v_readfirstlane_b32 s72, v1
-; GFX11-NEXT: v_readfirstlane_b32 s73, v2
-; GFX11-NEXT: v_writelane_b32 v40, s31, 1
-; GFX11-NEXT: v_writelane_b32 v41, s97, 1
-; GFX11-NEXT: v_readfirstlane_b32 s62, v3
-; GFX11-NEXT: v_readfirstlane_b32 s63, v4
-; GFX11-NEXT: v_readfirstlane_b32 s60, v5
-; GFX11-NEXT: v_writelane_b32 v40, s34, 2
-; GFX11-NEXT: v_writelane_b32 v41, s98, 2
-; GFX11-NEXT: v_readfirstlane_b32 s61, v6
-; GFX11-NEXT: v_readfirstlane_b32 s58, v7
-; GFX11-NEXT: v_readfirstlane_b32 s59, v8
-; GFX11-NEXT: v_writelane_b32 v40, s35, 3
-; GFX11-NEXT: v_writelane_b32 v41, s99, 3
-; GFX11-NEXT: v_readfirstlane_b32 s56, v9
-; GFX11-NEXT: v_readfirstlane_b32 s57, v10
-; GFX11-NEXT: v_readfirstlane_b32 s46, v11
-; GFX11-NEXT: v_writelane_b32 v40, s36, 4
-; GFX11-NEXT: v_writelane_b32 v41, s100, 4
-; GFX11-NEXT: v_readfirstlane_b32 s47, v12
-; GFX11-NEXT: v_readfirstlane_b32 s44, v13
-; GFX11-NEXT: v_readfirstlane_b32 s45, v14
-; GFX11-NEXT: v_writelane_b32 v40, s37, 5
-; GFX11-NEXT: v_writelane_b32 v41, s101, 5
-; GFX11-NEXT: s_mov_b32 vcc_hi, 0
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
-; GFX11-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
-; GFX11-NEXT: v_writelane_b32 v40, s38, 6
-; GFX11-NEXT: v_writelane_b32 v41, s102, 6
-; GFX11-NEXT: v_writelane_b32 v40, s39, 7
-; GFX11-NEXT: v_writelane_b32 v41, s103, 7
-; GFX11-NEXT: v_writelane_b32 v40, s48, 8
-; GFX11-NEXT: v_writelane_b32 v41, s104, 8
-; GFX11-NEXT: v_writelane_b32 v40, s49, 9
-; GFX11-NEXT: v_writelane_b32 v40, s50, 10
-; GFX11-NEXT: v_writelane_b32 v40, s51, 11
-; GFX11-NEXT: v_writelane_b32 v40, s52, 12
-; GFX11-NEXT: v_writelane_b32 v40, s53, 13
-; GFX11-NEXT: v_writelane_b32 v40, s54, 14
-; GFX11-NEXT: v_writelane_b32 v40, s55, 15
-; GFX11-NEXT: v_writelane_b32 v40, s64, 16
-; GFX11-NEXT: v_writelane_b32 v40, s65, 17
-; GFX11-NEXT: v_writelane_b32 v40, s66, 18
-; GFX11-NEXT: v_writelane_b32 v40, s67, 19
-; GFX11-NEXT: v_writelane_b32 v40, s68, 20
-; GFX11-NEXT: v_writelane_b32 v40, s69, 21
-; GFX11-NEXT: v_writelane_b32 v40, s70, 22
-; GFX11-NEXT: v_writelane_b32 v40, s71, 23
-; GFX11-NEXT: v_writelane_b32 v40, s80, 24
-; GFX11-NEXT: v_writelane_b32 v40, s81, 25
-; GFX11-NEXT: v_writelane_b32 v40, s82, 26
-; GFX11-NEXT: v_writelane_b32 v40, s83, 27
-; GFX11-NEXT: v_writelane_b32 v40, s84, 28
-; GFX11-NEXT: v_writelane_b32 v40, s85, 29
-; GFX11-NEXT: v_writelane_b32 v40, s86, 30
-; GFX11-NEXT: v_writelane_b32 v40, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s4, s27, 24
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 15
-; GFX11-NEXT: s_lshr_b32 s4, s27, 16
-; GFX11-NEXT: s_lshr_b32 s99, s2, 16
-; GFX11-NEXT: s_lshr_b32 s100, s2, 8
-; GFX11-NEXT: s_lshr_b32 s101, s1, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 14
-; GFX11-NEXT: s_lshr_b32 s4, s27, 8
-; GFX11-NEXT: s_lshr_b32 s11, s1, 16
-; GFX11-NEXT: s_lshr_b32 s102, s1, 8
-; GFX11-NEXT: s_lshr_b32 s103, s0, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 16
-; GFX11-NEXT: s_lshr_b32 s4, s26, 16
-; GFX11-NEXT: s_lshr_b32 s104, s0, 8
-; GFX11-NEXT: s_lshr_b32 s85, s45, 24
-; GFX11-NEXT: s_lshr_b32 s10, s45, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 17
-; GFX11-NEXT: s_lshr_b32 s4, s26, 8
-; GFX11-NEXT: s_lshr_b32 s5, s45, 8
-; GFX11-NEXT: s_lshr_b32 s87, s44, 16
-; GFX11-NEXT: s_lshr_b32 s86, s44, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 18
-; GFX11-NEXT: s_lshr_b32 s4, s25, 24
-; GFX11-NEXT: s_lshr_b32 s81, s47, 24
-; GFX11-NEXT: s_lshr_b32 s98, s47, 16
-; GFX11-NEXT: s_lshr_b32 s84, s47, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 19
-; GFX11-NEXT: s_lshr_b32 s4, s25, 16
-; GFX11-NEXT: s_lshr_b32 s48, s46, 8
-; GFX11-NEXT: s_lshr_b32 s70, s57, 24
-; GFX11-NEXT: s_lshr_b32 s97, s57, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 13
-; GFX11-NEXT: s_lshr_b32 s4, s25, 8
-; GFX11-NEXT: s_lshr_b32 s80, s57, 8
-; GFX11-NEXT: s_lshr_b32 s83, s56, 16
-; GFX11-NEXT: s_lshr_b32 s82, s56, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 20
-; GFX11-NEXT: s_lshr_b32 s4, s24, 16
-; GFX11-NEXT: s_lshr_b32 s66, s59, 24
-; GFX11-NEXT: s_lshr_b32 s9, s59, 16
-; GFX11-NEXT: s_lshr_b32 s69, s59, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 21
-; GFX11-NEXT: s_lshr_b32 s4, s24, 8
-; GFX11-NEXT: s_lshr_b32 s71, s58, 16
-; GFX11-NEXT: s_lshr_b32 s39, s58, 8
-; GFX11-NEXT: s_lshr_b32 s55, s61, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 22
-; GFX11-NEXT: s_lshr_b32 s4, s23, 24
-; GFX11-NEXT: s_lshr_b32 s8, s61, 16
-; GFX11-NEXT: s_lshr_b32 s65, s61, 8
-; GFX11-NEXT: s_lshr_b32 s68, s60, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 23
-; GFX11-NEXT: s_lshr_b32 s4, s23, 16
-; GFX11-NEXT: s_lshr_b32 s67, s60, 8
-; GFX11-NEXT: s_lshr_b32 s51, s63, 24
-; GFX11-NEXT: s_lshr_b32 s96, s63, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 12
-; GFX11-NEXT: s_lshr_b32 s4, s23, 8
-; GFX11-NEXT: s_lshr_b32 s54, s63, 8
-; GFX11-NEXT: s_lshr_b32 s38, s62, 16
-; GFX11-NEXT: s_lshr_b32 s64, s62, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 24
-; GFX11-NEXT: s_lshr_b32 s4, s22, 16
-; GFX11-NEXT: s_lshr_b32 s36, s73, 24
-; GFX11-NEXT: s_lshr_b32 s7, s73, 16
-; GFX11-NEXT: s_lshr_b32 s50, s73, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 25
-; GFX11-NEXT: s_lshr_b32 s4, s22, 8
-; GFX11-NEXT: s_lshr_b32 s53, s72, 16
-; GFX11-NEXT: s_lshr_b32 s52, s72, 8
-; GFX11-NEXT: s_lshr_b32 s34, s29, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 26
-; GFX11-NEXT: s_lshr_b32 s4, s21, 24
-; GFX11-NEXT: s_lshr_b32 s6, s29, 16
-; GFX11-NEXT: s_lshr_b32 s35, s29, 8
-; GFX11-NEXT: s_lshr_b32 s37, s28, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 27
-; GFX11-NEXT: s_lshr_b32 s4, s21, 16
-; GFX11-NEXT: s_lshr_b32 s49, s28, 8
-; GFX11-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 11
-; GFX11-NEXT: s_lshr_b32 s4, s21, 8
-; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[44:45], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[46:47], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 28
-; GFX11-NEXT: s_lshr_b32 s4, s20, 16
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[56:57], 24
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 29
-; GFX11-NEXT: s_lshr_b32 s4, s20, 8
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 30
-; GFX11-NEXT: s_lshr_b32 s4, s19, 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s4, 31
-; GFX11-NEXT: s_lshr_b32 s4, s19, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 10
-; GFX11-NEXT: s_lshr_b32 s4, s19, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 0
-; GFX11-NEXT: s_lshr_b32 s4, s18, 16
-; GFX11-NEXT: v_writelane_b32 v42, s4, 1
-; GFX11-NEXT: s_lshr_b32 s4, s18, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 2
-; GFX11-NEXT: s_lshr_b32 s4, s17, 24
-; GFX11-NEXT: v_writelane_b32 v42, s4, 3
-; GFX11-NEXT: s_lshr_b32 s4, s17, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s4, 9
-; GFX11-NEXT: s_lshr_b32 s4, s17, 8
-; GFX11-NEXT: v_writelane_b32 v42, s4, 4
-; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 5
-; GFX11-NEXT: s_lshr_b32 s4, s16, 8
-; GFX11-NEXT: v_writelane_b32 v42, s4, 6
-; GFX11-NEXT: s_lshr_b32 s4, s3, 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 7
-; GFX11-NEXT: s_lshr_b32 s4, s3, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 8
-; GFX11-NEXT: s_lshr_b32 s4, s3, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 8
-; GFX11-NEXT: s_lshr_b32 s4, s46, 16
-; GFX11-NEXT: v_writelane_b32 v43, s12, 6
-; GFX11-NEXT: v_writelane_b32 v43, s13, 7
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
-; GFX11-NEXT: v_writelane_b32 v43, s12, 4
-; GFX11-NEXT: v_writelane_b32 v43, s13, 5
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s12, 2
-; GFX11-NEXT: v_writelane_b32 v43, s13, 3
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
-; GFX11-NEXT: v_writelane_b32 v43, s12, 0
-; GFX11-NEXT: v_writelane_b32 v43, s13, 1
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
-; GFX11-NEXT: .LBB91_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s4, s29, 0xffff0000
-; GFX11-NEXT: s_and_b32 s14, s47, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s15, s47, 16
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s29, 16
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
-; GFX11-NEXT: s_and_b32 s8, s45, 0xffff0000
-; GFX11-NEXT: v_readfirstlane_b32 s47, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_lshl_b32 s7, s45, 16
-; GFX11-NEXT: s_and_b32 s78, s28, 0xffff0000
-; GFX11-NEXT: s_bfe_u32 s6, s47, 0x10010
-; GFX11-NEXT: s_lshl_b32 s79, s28, 16
-; GFX11-NEXT: s_add_i32 s45, s6, s47
-; GFX11-NEXT: s_and_b32 s5, s73, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s77, s73, 16
-; GFX11-NEXT: s_and_b32 s75, s72, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s76, s72, 16
-; GFX11-NEXT: s_and_b32 s11, s63, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s74, s63, 16
-; GFX11-NEXT: s_and_b32 s72, s62, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s73, s62, 16
-; GFX11-NEXT: s_and_b32 s63, s61, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s62, s61, 16
-; GFX11-NEXT: s_and_b32 s61, s60, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s60, s60, 16
-; GFX11-NEXT: s_and_b32 s41, s59, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s40, s59, 16
-; GFX11-NEXT: s_and_b32 s28, s58, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s29, s58, 16
-; GFX11-NEXT: s_and_b32 s13, s57, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s10, s57, 16
-; GFX11-NEXT: s_and_b32 s42, s56, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s43, s56, 16
-; GFX11-NEXT: s_and_b32 s12, s46, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s9, s46, 16
-; GFX11-NEXT: s_and_b32 s4, s44, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s47, 22
-; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_cselect_b32 s44, s47, s45
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v2
-; GFX11-NEXT: s_lshr_b32 s58, s44, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s78
-; GFX11-NEXT: v_readfirstlane_b32 s1, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s79
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_bfe_u32 s45, s1, 0x10010
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: s_add_i32 s45, s45, s1
-; GFX11-NEXT: s_bitset1_b32 s1, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s1, s1, s45
-; GFX11-NEXT: s_and_b32 s44, s0, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s44
-; GFX11-NEXT: v_bfe_u32 v5, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v6
-; GFX11-NEXT: s_lshr_b32 s1, s1, 16
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_readfirstlane_b32 s44, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v7
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v6
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v21
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s5
-; GFX11-NEXT: v_readfirstlane_b32 s0, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s77
-; GFX11-NEXT: s_bfe_u32 s5, s0, 0x10010
-; GFX11-NEXT: v_lshl_or_b32 v7, v22, 16, v4
-; GFX11-NEXT: s_add_i32 s45, s5, s0
-; GFX11-NEXT: s_lshr_b32 s5, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s0, 22
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s0, s0, s45
-; GFX11-NEXT: s_and_b32 s44, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s44
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v23
-; GFX11-NEXT: v_readfirstlane_b32 s44, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v5
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s3
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s76
-; GFX11-NEXT: s_lshr_b32 s59, s44, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s75
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_readfirstlane_b32 s3, v10
-; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v87, 24, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: s_bfe_u32 s45, s3, 0x10010
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_add_i32 s45, s45, s3
-; GFX11-NEXT: s_bitset1_b32 s3, 22
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s3, s3, s45
-; GFX11-NEXT: s_and_b32 s44, s2, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s44
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v8, v9
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_readfirstlane_b32 s44, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v24
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s74
-; GFX11-NEXT: v_lshl_or_b32 v14, v25, 16, v5
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 24, v14
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_readfirstlane_b32 s2, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_bfe_u32 s11, s2, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_add_i32 s45, s11, s2
-; GFX11-NEXT: s_lshr_b32 s11, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s2, 22
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s2, s2, s45
-; GFX11-NEXT: s_and_b32 s44, s17, 0xffff0000
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v26
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s44
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-NEXT: v_lshl_or_b32 v13, v2, 16, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_readfirstlane_b32 s44, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s17, s17, 16
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s73
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s72
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_readfirstlane_b32 s17, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshr_b32 s72, s44, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: s_bfe_u32 s45, s17, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v27
-; GFX11-NEXT: s_add_i32 s45, s45, s17
-; GFX11-NEXT: s_bitset1_b32 s17, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: v_lshl_or_b32 v16, v28, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-NEXT: s_cselect_b32 s17, s17, s45
-; GFX11-NEXT: s_and_b32 s44, s16, 0xffff0000
-; GFX11-NEXT: s_lshr_b32 s17, s17, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s63
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 24, v16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v5, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v29
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s44
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_readfirstlane_b32 s44, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s16, s16, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s16
-; GFX11-NEXT: s_lshr_b32 s46, s44, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_readfirstlane_b32 s16, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s62
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_bfe_u32 s45, s16, 0x10010
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_add_i32 s45, s45, s16
-; GFX11-NEXT: s_bitset1_b32 s16, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s16, s16, s45
-; GFX11-NEXT: s_and_b32 s44, s19, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s44
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v5
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: s_lshr_b32 s16, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v8, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s44, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v4
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s60
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v4
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s61
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s19, s19, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s19
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: s_lshr_b32 s60, s44, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
-; GFX11-NEXT: v_readfirstlane_b32 s19, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v9, v8
-; GFX11-NEXT: s_bfe_u32 s45, s19, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: s_add_i32 s45, s45, s19
-; GFX11-NEXT: s_bitset1_b32 s19, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s19, s19, s45
-; GFX11-NEXT: s_and_b32 s44, s18, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s44
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: s_lshr_b32 s19, s19, 16
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s29
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s41
-; GFX11-NEXT: v_readfirstlane_b32 s41, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_pack_ll_b32_b16 s47, s17, s72
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v2
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: s_bfe_u32 s44, s41, 0x10010
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_add_i32 s44, s44, s41
-; GFX11-NEXT: s_bitset1_b32 s41, 22
-; GFX11-NEXT: s_addk_i32 s44, 0x7fff
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s40
-; GFX11-NEXT: s_and_b32 s45, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s41, s41, s44
-; GFX11-NEXT: s_lshl_b32 s18, s18, 16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v31
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s18
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_lshl_or_b32 v18, v30, 16, v4
-; GFX11-NEXT: v_readfirstlane_b32 s18, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshl_or_b32 v17, v1, 16, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: s_bfe_u32 s40, s18, 0x10010
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s28
-; GFX11-NEXT: s_add_i32 s44, s40, s18
-; GFX11-NEXT: s_lshr_b32 s40, s41, 16
-; GFX11-NEXT: s_addk_i32 s44, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s18, 22
-; GFX11-NEXT: s_and_b32 s41, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s18, s18, s44
-; GFX11-NEXT: s_and_b32 s41, s21, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s41
-; GFX11-NEXT: v_bfe_u32 v2, v9, 16, 1
-; GFX11-NEXT: s_lshr_b32 s18, s18, 16
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s28, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v9
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v10, 16, 1
-; GFX11-NEXT: s_bfe_u32 s29, s28, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v1
-; GFX11-NEXT: s_add_i32 s29, s29, s28
-; GFX11-NEXT: s_bitset1_b32 s28, 22
-; GFX11-NEXT: s_addk_i32 s29, 0x7fff
-; GFX11-NEXT: s_and_b32 s41, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s28, s28, s29
-; GFX11-NEXT: s_lshl_b32 s21, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s21
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshr_b32 s61, s28, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s44, s2, s11
-; GFX11-NEXT: v_readfirstlane_b32 s21, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_bfe_u32 s29, s21, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v2
-; GFX11-NEXT: s_add_i32 s29, s29, s21
-; GFX11-NEXT: s_bitset1_b32 s21, 22
-; GFX11-NEXT: s_addk_i32 s29, 0x7fff
-; GFX11-NEXT: s_and_b32 s28, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s21, s21, s29
-; GFX11-NEXT: s_and_b32 s28, s20, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v10
-; GFX11-NEXT: s_lshr_b32 s21, s21, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s45, s3, s59
-; GFX11-NEXT: s_pack_ll_b32_b16 s46, s16, s46
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s13
-; GFX11-NEXT: v_readfirstlane_b32 s13, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 24, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_bfe_u32 s28, s13, 0x10010
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v34
-; GFX11-NEXT: s_add_i32 s28, s28, s13
-; GFX11-NEXT: s_bitset1_b32 s13, 22
-; GFX11-NEXT: s_addk_i32 s28, 0x7fff
-; GFX11-NEXT: s_and_b32 s29, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s13, s13, s28
-; GFX11-NEXT: s_lshl_b32 s20, s20, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s20
-; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s10
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v35
-; GFX11-NEXT: v_readfirstlane_b32 s20, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v19, v2, 16, v9
-; GFX11-NEXT: s_bfe_u32 s10, s20, 0x10010
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_add_i32 s28, s10, s20
-; GFX11-NEXT: s_lshr_b32 s10, s13, 16
-; GFX11-NEXT: s_addk_i32 s28, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s20, 22
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_cselect_b32 s13, s20, s28
-; GFX11-NEXT: s_and_b32 s20, s23, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s42
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s20
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s43
-; GFX11-NEXT: v_readfirstlane_b32 s28, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_bfe_u32 s20, s28, 0x10010
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: s_add_i32 s29, s20, s28
-; GFX11-NEXT: s_lshr_b32 s20, s13, 16
-; GFX11-NEXT: s_addk_i32 s29, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s28, 22
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s13, s28, s29
-; GFX11-NEXT: s_lshl_b32 s23, s23, 16
-; GFX11-NEXT: v_bfe_u32 v5, v9, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s23
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v8
-; GFX11-NEXT: s_lshr_b32 s62, s13, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v9
-; GFX11-NEXT: v_readfirstlane_b32 s23, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_bfe_u32 s28, s23, 0x10010
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: s_add_i32 s28, s28, s23
-; GFX11-NEXT: s_bitset1_b32 s23, 22
-; GFX11-NEXT: s_addk_i32 s28, 0x7fff
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_cselect_b32 s13, s23, s28
-; GFX11-NEXT: s_and_b32 s23, s22, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v36
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s23
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s14
-; GFX11-NEXT: s_lshr_b32 s23, s13, 16
-; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: v_readfirstlane_b32 s14, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshl_or_b32 v71, v37, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s12
-; GFX11-NEXT: s_bfe_u32 s15, s14, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: s_add_i32 s15, s15, s14
-; GFX11-NEXT: s_bitset1_b32 s14, 22
-; GFX11-NEXT: s_addk_i32 s15, 0x7fff
-; GFX11-NEXT: s_and_b32 s13, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s13, s14, s15
-; GFX11-NEXT: s_lshl_b32 s14, s22, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v38
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v8
-; GFX11-NEXT: s_lshr_b32 s13, s13, 16
-; GFX11-NEXT: v_readfirstlane_b32 s14, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT: v_lshl_or_b32 v70, v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
-; GFX11-NEXT: s_bfe_u32 s12, s14, 0x10010
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: s_add_i32 s12, s12, s14
-; GFX11-NEXT: s_bitset1_b32 s14, 22
-; GFX11-NEXT: s_addk_i32 s12, 0x7fff
-; GFX11-NEXT: s_and_b32 s15, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s12, s14, s12
-; GFX11-NEXT: s_and_b32 s14, s25, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s9
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s9, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshr_b32 s22, s12, 16
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: s_bfe_u32 s14, s9, 0x10010
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_add_i32 s14, s14, s9
-; GFX11-NEXT: s_bitset1_b32 s9, 22
-; GFX11-NEXT: s_addk_i32 s14, 0x7fff
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s9, s9, s14
-; GFX11-NEXT: s_lshl_b32 s12, s25, 16
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s12
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v3, v4
-; GFX11-NEXT: s_lshr_b32 s63, s9, 16
-; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v8
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
-; GFX11-NEXT: s_bfe_u32 s12, s8, 0x10010
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: s_add_i32 s12, s12, s8
-; GFX11-NEXT: s_bitset1_b32 s8, 22
-; GFX11-NEXT: s_addk_i32 s12, 0x7fff
-; GFX11-NEXT: s_and_b32 s9, vcc_lo, exec_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_cselect_b32 s8, s8, s12
-; GFX11-NEXT: s_and_b32 s9, s24, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_lshr_b32 s25, s8, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s6
-; GFX11-NEXT: v_readfirstlane_b32 s7, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v9
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s0, s5
-; GFX11-NEXT: s_bfe_u32 s9, s7, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v3
-; GFX11-NEXT: s_add_i32 s9, s9, s7
-; GFX11-NEXT: s_bitset1_b32 s7, 22
-; GFX11-NEXT: s_addk_i32 s9, 0x7fff
-; GFX11-NEXT: s_and_b32 s8, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s7, s7, s9
-; GFX11-NEXT: s_lshl_b32 s8, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
-; GFX11-NEXT: v_readfirstlane_b32 s8, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v8
-; GFX11-NEXT: v_bfe_u32 v10, v12, 16, 1
-; GFX11-NEXT: s_bfe_u32 s4, s8, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v2
-; GFX11-NEXT: s_add_i32 s4, s4, s8
-; GFX11-NEXT: s_bitset1_b32 s8, 22
-; GFX11-NEXT: s_addk_i32 s4, 0x7fff
-; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s8, s4
-; GFX11-NEXT: s_and_b32 s6, s27, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v52, 0x40c00000, s6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v12
-; GFX11-NEXT: s_lshr_b32 s24, s4, 16
-; GFX11-NEXT: v_readfirstlane_b32 s6, v52
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
-; GFX11-NEXT: v_bfe_u32 v4, v9, 16, 1
-; GFX11-NEXT: s_bfe_u32 s7, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_add_i32 s7, s7, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s7, 0x7fff
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s6, s7
-; GFX11-NEXT: s_lshl_b32 s6, s27, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v4, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v12
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_lshr_b32 s73, s4, 16
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v49
-; GFX11-NEXT: v_readfirstlane_b32 s6, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v51
-; GFX11-NEXT: v_lshl_or_b32 v66, v1, 16, v11
-; GFX11-NEXT: s_bfe_u32 s7, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_add_i32 s7, s7, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s7, 0x7fff
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s6, s7
-; GFX11-NEXT: s_and_b32 s6, s26, 0xffff0000
-; GFX11-NEXT: s_lshr_b32 s27, s4, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v52
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v39
-; GFX11-NEXT: v_lshl_or_b32 v55, v50, 16, v4
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s22, s13
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshl_or_b32 v54, v2, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v67, v48, 16, v5
-; GFX11-NEXT: v_lshrrev_b64 v[8:9], 24, v[17:18]
-; GFX11-NEXT: s_bfe_u32 s5, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b64 v[9:10], 24, v[15:16]
-; GFX11-NEXT: s_add_i32 s5, s5, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s5, 0x7fff
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s14, s6, s5
-; GFX11-NEXT: s_lshl_b32 s4, s26, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s20, s10
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
-; GFX11-NEXT: s_lshr_b32 s13, s14, 16
-; GFX11-NEXT: v_lshrrev_b64 v[10:11], 24, v[13:14]
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[6:7]
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s1, s58
-; GFX11-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_lshrrev_b64 v[1:2], 24, v[54:55]
-; GFX11-NEXT: v_lshrrev_b64 v[2:3], 24, v[66:67]
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[70:71]
-; GFX11-NEXT: s_bfe_u32 s10, s11, 0x10010
-; GFX11-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
-; GFX11-NEXT: s_add_i32 s10, s10, s11
-; GFX11-NEXT: s_bitset1_b32 s11, 22
-; GFX11-NEXT: s_addk_i32 s10, 0x7fff
-; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s10, s11, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s19, s60
-; GFX11-NEXT: s_lshr_b32 s26, s10, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s4, s18, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s23, s62
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v55
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 8, v55
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v54
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 8, v54
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 24, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 8, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v66
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 8, v66
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 24, v71
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 8, v71
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v70
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 8, v70
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 24, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 8, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 8, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 8, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 8, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 8, v6
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s21, s61
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s25, s63
-; GFX11-NEXT: s_pack_ll_b32_b16 s57, s27, s73
-; GFX11-NEXT: s_pack_ll_b32_b16 s56, s26, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s24, s12
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
-; GFX11-NEXT: s_lshr_b64 s[14:15], s[46:47], 24
-; GFX11-NEXT: s_lshr_b64 s[40:41], s[44:45], 24
-; GFX11-NEXT: s_lshr_b64 s[42:43], s[28:29], 24
-; GFX11-NEXT: s_lshr_b64 vcc, s[56:57], 24
-; GFX11-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s13, s57, 24
-; GFX11-NEXT: s_lshr_b32 s15, s57, 8
-; GFX11-NEXT: s_lshr_b32 s41, s56, 16
-; GFX11-NEXT: s_lshr_b32 s43, s56, 8
-; GFX11-NEXT: s_lshr_b32 s56, s11, 24
-; GFX11-NEXT: s_lshr_b32 s11, s11, 8
-; GFX11-NEXT: s_lshr_b32 s57, s10, 16
-; GFX11-NEXT: s_lshr_b32 s10, s10, 8
-; GFX11-NEXT: s_lshr_b32 s74, s9, 24
-; GFX11-NEXT: s_lshr_b32 s9, s9, 8
-; GFX11-NEXT: s_lshr_b32 s75, s8, 16
-; GFX11-NEXT: s_lshr_b32 s8, s8, 8
-; GFX11-NEXT: s_lshr_b32 s76, s7, 24
-; GFX11-NEXT: s_lshr_b32 s77, s7, 8
-; GFX11-NEXT: s_lshr_b32 s78, s6, 16
-; GFX11-NEXT: s_lshr_b32 s79, s6, 8
-; GFX11-NEXT: s_lshr_b32 s88, s5, 24
-; GFX11-NEXT: s_lshr_b32 s89, s5, 8
-; GFX11-NEXT: s_lshr_b32 s90, s4, 16
-; GFX11-NEXT: s_lshr_b32 s91, s4, 8
-; GFX11-NEXT: s_lshr_b32 s92, s47, 24
-; GFX11-NEXT: s_lshr_b32 s47, s47, 8
-; GFX11-NEXT: s_lshr_b32 s93, s46, 16
-; GFX11-NEXT: s_lshr_b32 s46, s46, 8
-; GFX11-NEXT: s_lshr_b32 s95, s45, 24
-; GFX11-NEXT: s_lshr_b32 s45, s45, 8
-; GFX11-NEXT: s_lshr_b32 s99, s44, 16
-; GFX11-NEXT: s_lshr_b32 s100, s44, 8
-; GFX11-NEXT: s_lshr_b32 s101, s29, 24
-; GFX11-NEXT: s_lshr_b32 s102, s29, 8
-; GFX11-NEXT: s_lshr_b32 s103, s28, 16
-; GFX11-NEXT: s_lshr_b32 s104, s28, 8
-; GFX11-NEXT: s_branch .LBB91_5
-; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s4, 0
-; GFX11-NEXT: v_writelane_b32 v43, s5, 1
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s4, 2
-; GFX11-NEXT: v_writelane_b32 v43, s5, 3
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s74, 4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s75, 5
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: v_writelane_b32 v43, s74, 6
-; GFX11-NEXT: v_writelane_b32 v43, s75, 7
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: s_branch .LBB91_2
-; GFX11-NEXT: .LBB91_4:
-; GFX11-NEXT: v_dual_mov_b32 v10, s94 :: v_dual_mov_b32 v11, s30
-; GFX11-NEXT: v_readlane_b32 s94, v43, 2
-; GFX11-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
-; GFX11-NEXT: v_dual_mov_b32 v6, s49 :: v_dual_mov_b32 v7, s35
-; GFX11-NEXT: v_readlane_b32 s95, v43, 3
-; GFX11-NEXT: v_readlane_b32 vcc_lo, v43, 6
-; GFX11-NEXT: v_readlane_b32 s30, v43, 0
-; GFX11-NEXT: v_readlane_b32 s34, v43, 4
-; GFX11-NEXT: v_dual_mov_b32 v52, s44 :: v_dual_mov_b32 v51, s45
-; GFX11-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v49, s46
-; GFX11-NEXT: v_dual_mov_b32 v39, s47 :: v_dual_mov_b32 v48, s98
-; GFX11-NEXT: v_dual_mov_b32 v38, s56 :: v_dual_mov_b32 v37, s97
-; GFX11-NEXT: v_dual_mov_b32 v36, s57 :: v_dual_mov_b32 v35, s58
-; GFX11-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s9
-; GFX11-NEXT: v_dual_mov_b32 v32, s60 :: v_dual_mov_b32 v31, s61
-; GFX11-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v29, s62
-; GFX11-NEXT: v_dual_mov_b32 v27, s63 :: v_dual_mov_b32 v28, s96
-; GFX11-NEXT: v_dual_mov_b32 v26, s72 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s73 :: v_dual_mov_b32 v23, s28
-; GFX11-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v22, s6
-; GFX11-NEXT: v_dual_mov_b32 v53, s87 :: v_dual_mov_b32 v54, s86
-; GFX11-NEXT: v_dual_mov_b32 v5, s85 :: v_dual_mov_b32 v12, s5
-; GFX11-NEXT: v_dual_mov_b32 v65, s4 :: v_dual_mov_b32 v66, s48
-; GFX11-NEXT: v_dual_mov_b32 v55, s81 :: v_dual_mov_b32 v64, s84
-; GFX11-NEXT: v_dual_mov_b32 v69, s83 :: v_dual_mov_b32 v70, s82
-; GFX11-NEXT: v_dual_mov_b32 v67, s70 :: v_dual_mov_b32 v68, s80
-; GFX11-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v19, s39
-; GFX11-NEXT: v_dual_mov_b32 v71, s66 :: v_dual_mov_b32 v20, s69
-; GFX11-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v17, s67
-; GFX11-NEXT: v_dual_mov_b32 v81, s55 :: v_dual_mov_b32 v18, s65
-; GFX11-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v15, s64
-; GFX11-NEXT: v_dual_mov_b32 v83, s51 :: v_dual_mov_b32 v16, s54
-; GFX11-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v13, s52
-; GFX11-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v14, s50
-; GFX11-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
-; GFX11-NEXT: v_dual_mov_b32 v3, s78 :: v_dual_mov_b32 v4, s88
-; GFX11-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
-; GFX11-NEXT: s_mov_b32 s58, s11
-; GFX11-NEXT: v_readlane_b32 s59, v43, 8
-; GFX11-NEXT: v_readlane_b32 s72, v43, 9
-; GFX11-NEXT: v_readlane_b32 s60, v43, 10
-; GFX11-NEXT: v_readlane_b32 s61, v43, 11
-; GFX11-NEXT: v_readlane_b32 s62, v43, 12
-; GFX11-NEXT: v_readlane_b32 s63, v43, 13
-; GFX11-NEXT: v_readlane_b32 s73, v43, 14
-; GFX11-NEXT: v_readlane_b32 s13, v43, 15
-; GFX11-NEXT: v_readlane_b32 s15, v43, 16
-; GFX11-NEXT: v_readlane_b32 s41, v43, 17
-; GFX11-NEXT: v_readlane_b32 s43, v43, 18
-; GFX11-NEXT: v_readlane_b32 s56, v43, 19
-; GFX11-NEXT: v_readlane_b32 s11, v43, 20
-; GFX11-NEXT: v_readlane_b32 s57, v43, 21
-; GFX11-NEXT: v_readlane_b32 s10, v43, 22
-; GFX11-NEXT: v_readlane_b32 s74, v43, 23
-; GFX11-NEXT: v_readlane_b32 s9, v43, 24
-; GFX11-NEXT: v_readlane_b32 s75, v43, 25
-; GFX11-NEXT: v_readlane_b32 s8, v43, 26
-; GFX11-NEXT: v_readlane_b32 s76, v43, 27
-; GFX11-NEXT: v_readlane_b32 s77, v43, 28
-; GFX11-NEXT: v_readlane_b32 s78, v43, 29
-; GFX11-NEXT: v_readlane_b32 s79, v43, 30
-; GFX11-NEXT: v_readlane_b32 s88, v43, 31
-; GFX11-NEXT: v_readlane_b32 s89, v42, 0
-; GFX11-NEXT: v_readlane_b32 s90, v42, 1
-; GFX11-NEXT: v_readlane_b32 s91, v42, 2
-; GFX11-NEXT: v_readlane_b32 s92, v42, 3
-; GFX11-NEXT: v_readlane_b32 s47, v42, 4
-; GFX11-NEXT: v_readlane_b32 s93, v42, 5
-; GFX11-NEXT: v_readlane_b32 vcc_hi, v43, 7
-; GFX11-NEXT: v_readlane_b32 s46, v42, 6
-; GFX11-NEXT: v_readlane_b32 s31, v43, 1
-; GFX11-NEXT: v_readlane_b32 s95, v42, 7
-; GFX11-NEXT: v_readlane_b32 s45, v42, 8
-; GFX11-NEXT: v_readlane_b32 s35, v43, 5
-; GFX11-NEXT: .LBB91_5: ; %end
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s104, 8
-; GFX11-NEXT: s_and_b32 s5, s103, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s42, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s4
-; GFX11-NEXT: s_or_b32 s4, s5, s6
-; GFX11-NEXT: s_and_b32 s1, s1, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s102, 8
-; GFX11-NEXT: s_and_b32 s6, s58, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s101, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s0, s0, s4
-; GFX11-NEXT: s_or_b32 s1, s1, s5
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s100, 8
-; GFX11-NEXT: s_and_b32 s5, s99, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s40, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s4
-; GFX11-NEXT: s_or_b32 s4, s5, s6
-; GFX11-NEXT: s_and_b32 s3, s3, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s45, 8
-; GFX11-NEXT: s_and_b32 s6, s59, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s95, 8
-; GFX11-NEXT: s_or_b32 s3, s3, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s2, s2, s4
-; GFX11-NEXT: s_or_b32 s3, s3, s5
-; GFX11-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
-; GFX11-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
-; GFX11-NEXT: s_and_b32 s0, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s46, 8
-; GFX11-NEXT: s_and_b32 s2, s93, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s14, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s17, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s47, 8
-; GFX11-NEXT: s_and_b32 s4, s72, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s92, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s91, 8
-; GFX11-NEXT: s_and_b32 s4, s90, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s12, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s4, s19, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s89, 8
-; GFX11-NEXT: s_and_b32 s6, s60, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s88, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
-; GFX11-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
-; GFX11-NEXT: s_and_b32 s0, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s79, 8
-; GFX11-NEXT: s_and_b32 s2, s78, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s30, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s21, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s77, 8
-; GFX11-NEXT: s_and_b32 s4, s61, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s76, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_and_b32 s2, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s8, 8
-; GFX11-NEXT: s_and_b32 s4, s75, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s94, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: s_and_b32 s4, s23, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s9, 8
-; GFX11-NEXT: s_and_b32 s6, s62, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s74, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
-; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[97:100], off
-; GFX11-NEXT: scratch_store_b128 v0, v[112:115], off offset:16
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
-; GFX11-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
-; GFX11-NEXT: s_and_b32 s0, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s10, 8
-; GFX11-NEXT: s_and_b32 s2, s57, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s34, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, s4
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_lshl_b32 s2, s11, 8
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_and_b32 s1, s25, 0xff
-; GFX11-NEXT: s_and_b32 s3, s63, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s56, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: s_or_b32 s2, s3, s4
-; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: s_and_b32 s3, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s43, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: s_or_b32 s2, s3, s4
-; GFX11-NEXT: s_and_b32 s3, s41, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, vcc_lo, 8
-; GFX11-NEXT: s_lshl_b32 s5, s15, 8
-; GFX11-NEXT: s_or_b32 s3, s3, s4
-; GFX11-NEXT: s_and_b32 s4, s27, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s13, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s73, 0xff
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v23, 0xff, v23
-; GFX11-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v6, 8, v6
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v11, 8, v11
-; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
-; GFX11-NEXT: v_or_b32_e32 v6, v23, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_or_b32_e32 v11, v96, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v10
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 8, v8
-; GFX11-NEXT: v_or_b32_e32 v23, v6, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v21
-; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v87
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v26
-; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v86
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
-; GFX11-NEXT: v_or_b32_e32 v7, v11, v21
-; GFX11-NEXT: v_or_b32_e32 v11, v22, v13
-; GFX11-NEXT: v_or_b32_e32 v10, v26, v10
-; GFX11-NEXT: v_or_b32_e32 v13, v24, v14
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v85
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v84
-; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v27
-; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v28
-; GFX11-NEXT: v_lshlrev_b32_e32 v27, 8, v83
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v21
-; GFX11-NEXT: v_or_b32_e32 v15, v22, v15
-; GFX11-NEXT: v_or_b32_e32 v9, v24, v9
-; GFX11-NEXT: v_or_b32_e32 v16, v25, v16
-; GFX11-NEXT: v_or_b32_e32 v21, v26, v27
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_or_b32_e32 v24, v6, v7
-; GFX11-NEXT: v_or_b32_e32 v25, v11, v10
-; GFX11-NEXT: v_or_b32_e32 v26, v13, v14
-; GFX11-NEXT: v_or_b32_e32 v6, v15, v9
-; GFX11-NEXT: v_or_b32_e32 v7, v16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v17
-; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v82
-; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v31
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v81
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v19
-; GFX11-NEXT: v_or_b32_e32 v9, v9, v10
-; GFX11-NEXT: v_or_b32_e32 v8, v11, v8
-; GFX11-NEXT: v_or_b32_e32 v10, v13, v14
-; GFX11-NEXT: v_or_b32_e32 v11, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v13, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v80
-; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v34
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v20
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v33
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v71
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v70
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v69
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 8, v3
-; GFX11-NEXT: v_or_b32_e32 v4, v14, v4
-; GFX11-NEXT: v_or_b32_e32 v14, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v15, v17, v18
-; GFX11-NEXT: v_or_b32_e32 v16, v19, v20
-; GFX11-NEXT: v_or_b32_e32 v3, v21, v3
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v8, v9, v8
-; GFX11-NEXT: v_or_b32_e32 v9, v10, v11
-; GFX11-NEXT: v_or_b32_e32 v13, v13, v4
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX11-NEXT: v_or_b32_e32 v15, v16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v36
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v68
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v67
-; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v66
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v65
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 8, v2
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v39
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v64
-; GFX11-NEXT: v_or_b32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v4, v10, v11
-; GFX11-NEXT: v_or_b32_e32 v10, v16, v17
-; GFX11-NEXT: v_or_b32_e32 v2, v18, v2
-; GFX11-NEXT: v_or_b32_e32 v11, v19, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v48
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v55
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v52
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v54
-; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v53
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v51
-; GFX11-NEXT: v_lshlrev_b32_e32 v12, 8, v12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v50
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
-; GFX11-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX11-NEXT: v_or_b32_e32 v17, v18, v19
-; GFX11-NEXT: v_or_b32_e32 v1, v20, v1
-; GFX11-NEXT: v_or_b32_e32 v12, v21, v12
-; GFX11-NEXT: v_or_b32_e32 v5, v22, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_or_b32_e32 v16, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v1, v10, v2
-; GFX11-NEXT: v_or_b32_e32 v2, v11, v18
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v19
-; GFX11-NEXT: v_or_b32_e32 v4, v12, v5
-; GFX11-NEXT: s_clause 0x5
-; GFX11-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
-; GFX11-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
-; GFX11-NEXT: scratch_store_b128 v0, v[23:26], off offset:64
-; GFX11-NEXT: scratch_store_b128 v0, v[6:9], off offset:80
-; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:96
-; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: v_readlane_b32 s104, v41, 8
-; GFX11-NEXT: v_readlane_b32 s103, v41, 7
-; GFX11-NEXT: v_readlane_b32 s102, v41, 6
-; GFX11-NEXT: v_readlane_b32 s101, v41, 5
-; GFX11-NEXT: v_readlane_b32 s100, v41, 4
-; GFX11-NEXT: v_readlane_b32 s99, v41, 3
-; GFX11-NEXT: v_readlane_b32 s98, v41, 2
-; GFX11-NEXT: v_readlane_b32 s97, v41, 1
-; GFX11-NEXT: v_readlane_b32 s96, v41, 0
-; GFX11-NEXT: v_readlane_b32 s87, v40, 31
-; GFX11-NEXT: v_readlane_b32 s86, v40, 30
-; GFX11-NEXT: v_readlane_b32 s85, v40, 29
-; GFX11-NEXT: v_readlane_b32 s84, v40, 28
-; GFX11-NEXT: v_readlane_b32 s83, v40, 27
-; GFX11-NEXT: v_readlane_b32 s82, v40, 26
-; GFX11-NEXT: v_readlane_b32 s81, v40, 25
-; GFX11-NEXT: v_readlane_b32 s80, v40, 24
-; GFX11-NEXT: v_readlane_b32 s71, v40, 23
-; GFX11-NEXT: v_readlane_b32 s70, v40, 22
-; GFX11-NEXT: v_readlane_b32 s69, v40, 21
-; GFX11-NEXT: v_readlane_b32 s68, v40, 20
-; GFX11-NEXT: v_readlane_b32 s67, v40, 19
-; GFX11-NEXT: v_readlane_b32 s66, v40, 18
-; GFX11-NEXT: v_readlane_b32 s65, v40, 17
-; GFX11-NEXT: v_readlane_b32 s64, v40, 16
-; GFX11-NEXT: v_readlane_b32 s55, v40, 15
-; GFX11-NEXT: v_readlane_b32 s54, v40, 14
-; GFX11-NEXT: v_readlane_b32 s53, v40, 13
-; GFX11-NEXT: v_readlane_b32 s52, v40, 12
-; GFX11-NEXT: v_readlane_b32 s51, v40, 11
-; GFX11-NEXT: v_readlane_b32 s50, v40, 10
-; GFX11-NEXT: v_readlane_b32 s49, v40, 9
-; GFX11-NEXT: v_readlane_b32 s48, v40, 8
-; GFX11-NEXT: v_readlane_b32 s39, v40, 7
-; GFX11-NEXT: v_readlane_b32 s38, v40, 6
-; GFX11-NEXT: v_readlane_b32 s37, v40, 5
-; GFX11-NEXT: v_readlane_b32 s36, v40, 4
-; GFX11-NEXT: v_readlane_b32 s35, v40, 3
-; GFX11-NEXT: v_readlane_b32 s34, v40, 2
-; GFX11-NEXT: v_readlane_b32 s31, v40, 1
-; GFX11-NEXT: v_readlane_b32 s30, v40, 0
-; GFX11-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: scratch_load_b32 v40, off, s32
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:12
-; GFX11-NEXT: s_mov_b32 exec_lo, s0
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v128i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s4, -1
+; GFX11-TRUE16-NEXT: s_clause 0x3
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:12
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s30, 0
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s96, 0
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s72, v1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s73, v2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s31, 1
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s97, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s62, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s63, v4
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s60, v5
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s34, 2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s98, 2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s61, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s58, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s59, v8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s35, 3
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s99, 3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s46, v9
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s47, v10
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s44, v11
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s36, 4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s100, 4
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v12
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v13
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s43, v14
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s37, 5
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s101, 5
+; GFX11-TRUE16-NEXT: s_mov_b32 vcc_hi, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s38, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s102, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s39, 7
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s103, 7
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s48, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v41, s104, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s49, 9
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s50, 10
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s51, 11
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s52, 12
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s53, 13
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s54, 14
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s55, 15
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s64, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s65, 17
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s66, 18
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s67, 19
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s68, 20
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s69, 21
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s70, 22
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s71, 23
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s80, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s81, 25
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s82, 26
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s83, 27
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s84, 28
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s85, 29
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s86, 30
+; GFX11-TRUE16-NEXT: v_writelane_b32 v40, s87, 31
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s27, 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 15
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s99, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s100, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s101, s1, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 14
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s27, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s85, s43, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s43, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 17
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s26, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s43, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s87, s42, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s86, s42, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 18
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s25, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s81, s45, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s98, s45, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s84, s45, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 19
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s48, s44, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s70, s47, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s97, s47, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 13
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s25, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s80, s47, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s83, s46, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s82, s46, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 20
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s66, s59, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s59, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s69, s59, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 21
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s24, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s71, s58, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s39, s58, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s55, s61, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 22
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s23, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s61, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s65, s61, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s68, s60, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 23
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s67, s60, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s51, s63, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s96, s63, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 12
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s23, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s54, s63, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s38, s62, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s64, s62, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s36, s73, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s73, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s50, s73, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 25
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s22, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s53, s72, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s52, s72, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s34, s29, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 26
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s21, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s35, s29, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s37, s28, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 27
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s49, s28, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 11
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s21, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[56:57], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[74:75], s[42:43], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[76:77], s[44:45], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 28
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[78:79], s[46:47], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 29
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s20, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 30
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s19, 24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 31
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s19, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 10
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s19, 8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s17, 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 3
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s17, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 9
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s17, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 4
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 5
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 8
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 6
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s3, 24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s3, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s3, 8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v42, s4, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s44, 16
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 7
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 5
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 3
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s12, 0
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s13, 1
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-TRUE16-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s29, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s29, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s78, s28, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s15, s45, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s28, s45, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s43, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s43, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s73, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s77, s73, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s76, s72, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s75, s72, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s63, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s74, s63, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s73, s62, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s72, s62, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s62, s61, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s63, s61, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s61, s60, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s57, s60, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s40, s59, 0xffff0000
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s45, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s56, s59, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s29, s58, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s14, s58, 16
+; GFX11-TRUE16-NEXT: s_bfe_u32 s4, s45, 0x10010
+; GFX11-TRUE16-NEXT: s_and_b32 s12, s47, 0xffff0000
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s4, s45
+; GFX11-TRUE16-NEXT: s_lshl_b32 s13, s47, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s47, s46, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s41, s46, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s44, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s44, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s42, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s42, 16
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s45, 22
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s45, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s78
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s58, s42, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v3, v6
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s1, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s1, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.l
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s1
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s1, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_cselect_b32 s1, s1, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s77
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v25.l
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s6, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s6, s42, s6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v8, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s0, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s0
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s0, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s0, s0, s42
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v27.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s76
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s75
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s59, s42, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s3, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s3, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s3
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s3, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s74
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v26.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v28.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
+; GFX11-TRUE16-NEXT: s_bfe_u32 s11, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s11, s11, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s11, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s11, s42, s11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s11, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s2, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s2, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s2
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s2, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s2, s2, s42
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s73
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v30.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s72
+; GFX11-TRUE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s17, s17, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s60, s42, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s17, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v29.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s17, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v31.l
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s17
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s17, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s17, s17, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s63
+; GFX11-TRUE16-NEXT: s_lshr_b32 s17, s17, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[13:14], 24, v[11:12]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[14:15], 24, v[4:5]
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s16, s16, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s62
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s42, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s16, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s16, 0x10010
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s16
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s16, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s16, s16, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s16, s16, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s61
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v33.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s57
+; GFX11-TRUE16-NEXT: s_and_b32 s45, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s42, s42, s43
+; GFX11-TRUE16-NEXT: s_lshl_b32 s19, s19, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s61, s42, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s19, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s43, s19, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v32.l
+; GFX11-TRUE16-NEXT: s_add_i32 s43, s43, s19
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s19, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s43, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s19, s19, s43
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s56
+; GFX11-TRUE16-NEXT: s_lshr_b32 s19, s19, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s42, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v34.l
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s45, s17, s60
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s44, s16, s44
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s40
+; GFX11-TRUE16-NEXT: s_bfe_u32 s40, s42, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s40, s40, s42
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s42, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s40, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s40, s42, s40
+; GFX11-TRUE16-NEXT: s_lshl_b32 s18, s18, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s40, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s18, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s18, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s18
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s18, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s18, s18, s42
+; GFX11-TRUE16-NEXT: s_and_b32 s42, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s42
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s29
+; GFX11-TRUE16-NEXT: s_lshr_b32 s18, s18, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s29, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s42, s29, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s42, s42, s29
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s29, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s42, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: s_and_b32 s43, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s29, s29, s42
+; GFX11-TRUE16-NEXT: s_lshl_b32 s21, s21, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s62, s29, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s42, s2, s11
+; GFX11-TRUE16-NEXT: s_bfe_u32 s21, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v35.l
+; GFX11-TRUE16-NEXT: s_add_i32 s21, s21, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s21, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s29, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s14, s14, s21
+; GFX11-TRUE16-NEXT: s_and_b32 s21, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s13
+; GFX11-TRUE16-NEXT: s_lshr_b32 s21, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v36.l
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s43, s3, s59
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s12
+; GFX11-TRUE16-NEXT: s_bfe_u32 s12, s13, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, s13
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s13, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s12, s13, s12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s13, s20, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s13, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_bfe_u32 s14, s13, 0x10010
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_add_i32 s14, s14, s13
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s13, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s14, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s20, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s13, s14
+; GFX11-TRUE16-NEXT: s_and_b32 s14, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s47
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v6
+; GFX11-TRUE16-NEXT: s_bfe_u32 s20, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_add_i32 s29, s20, s14
+; GFX11-TRUE16-NEXT: s_lshr_b32 s20, s13, 16
+; GFX11-TRUE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s14, s29
+; GFX11-TRUE16-NEXT: s_lshl_b32 s14, s23, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s41
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v7
+; GFX11-TRUE16-NEXT: s_lshr_b32 s63, s13, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s23, s14, 0x10010
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s0, s6
+; GFX11-TRUE16-NEXT: s_add_i32 s23, s23, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s23, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s14, s23
+; GFX11-TRUE16-NEXT: s_and_b32 s14, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s23, s13, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v38.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s15, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s15, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s13, s14, s15
+; GFX11-TRUE16-NEXT: s_lshl_b32 s14, s22, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v48.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s14, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v7, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_bfe_u32 s15, s14, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_add_i32 s15, s15, s14
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s15, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s22, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_cselect_b32 s14, s14, s15
+; GFX11-TRUE16-NEXT: s_and_b32 s15, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_lshr_b32 s22, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s10, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v1.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s9, s10, 0x10010
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_add_i32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s10, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s72, s9, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_bfe_u32 s10, s8, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v51.l
+; GFX11-TRUE16-NEXT: s_add_i32 s10, s10, s8
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s9, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s8, s8, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_lshr_b32 s25, s8, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v8
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s9, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v50.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v49.l
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s1, s58
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_bfe_u32 s7, s9, 0x10010
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, s9
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s9, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s7, s9, s7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s24, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s7, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v53.l
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s8, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_bfe_u32 s5, s8, 0x10010
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v9
+; GFX11-TRUE16-NEXT: s_add_i32 s5, s5, s8
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s7, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s5, s8, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s24, s5, 16
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v8, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v2
+; GFX11-TRUE16-NEXT: s_bfe_u32 s7, s4, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, s4
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s4, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s4, s4, s7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s27, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: s_lshr_b32 s73, s4, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.l, v54.l
+; GFX11-TRUE16-NEXT: s_bfe_u32 s7, s5, 0x10010
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v52.l
+; GFX11-TRUE16-NEXT: s_add_i32 s7, s7, s5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s5, 22
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s4, s5, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshr_b32 s27, s4, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.l, v55.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[6:7], 24, v[22:23]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[7:8], 24, v[20:21]
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v2.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[8:9], 24, v[18:19]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[9:10], 24, v[16:17]
+; GFX11-TRUE16-NEXT: s_bfe_u32 s6, s5, 0x10010
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s63
+; GFX11-TRUE16-NEXT: s_add_i32 s6, s6, s5
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s5, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s14, s5, s6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s26, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s14, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s61
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s40
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 8, v65
+; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[1:2], 24, v[64:65]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[2:3], 24, v[68:69]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 24, v65
+; GFX11-TRUE16-NEXT: s_bfe_u32 s12, s11, 0x10010
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v64
+; GFX11-TRUE16-NEXT: s_add_i32 s12, s12, s11
+; GFX11-TRUE16-NEXT: s_bitset1_b32 s11, 22
+; GFX11-TRUE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cselect_b32 s12, s11, s12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 8, v64
+; GFX11-TRUE16-NEXT: s_lshr_b32 s26, s12, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 24, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 8, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v68
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 8, v68
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 24, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s62
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s72
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s47, s27, s73
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s46, s26, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[14:15], s[44:45], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[40:41], s[42:43], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[56:57], s[28:29], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 vcc, s[46:47], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s47, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s47, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s46, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s46, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s47, s11, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s11, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s57, s10, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s10, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s74, s9, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s9, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s75, s8, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s8, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s76, s7, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s77, s7, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s78, s6, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s79, s6, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s88, s5, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s89, s5, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s90, s4, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s91, s4, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s92, s45, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s45, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s93, s44, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s44, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s95, s43, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s43, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s99, s42, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s100, s42, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s101, s29, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s102, s29, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s103, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-TRUE16-NEXT: s_branch .LBB91_5
+; GFX11-TRUE16-NEXT: .LBB91_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr104
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr103
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr56
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr102
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr101
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr100
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr99
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr96
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr84
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr98
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr87
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr85
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr94
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr92
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr90
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr88
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr78
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 0
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s5, 1
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s4, 2
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s5, 3
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s74, 4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s75, 5
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; kill: killed $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s74, 6
+; GFX11-TRUE16-NEXT: v_writelane_b32 v43, s75, 7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-TRUE16-NEXT: s_branch .LBB91_2
+; GFX11-TRUE16-NEXT: .LBB91_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s94 :: v_dual_mov_b32 v14, s30
+; GFX11-TRUE16-NEXT: v_readlane_b32 s94, v43, 2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s49 :: v_dual_mov_b32 v5, s35
+; GFX11-TRUE16-NEXT: v_readlane_b32 s95, v43, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 vcc_lo, v43, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v43, 0
+; GFX11-TRUE16-NEXT: v_readlane_b32 s34, v43, 4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, s42 :: v_dual_mov_b32 v54, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s10 :: v_dual_mov_b32 v53, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s45 :: v_dual_mov_b32 v49, s98
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, s46 :: v_dual_mov_b32 v38, s47
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s97 :: v_dual_mov_b32 v39, s58
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s59 :: v_dual_mov_b32 v36, s60
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s9 :: v_dual_mov_b32 v32, s61
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v33, s62
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s63 :: v_dual_mov_b32 v30, s72
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s96 :: v_dual_mov_b32 v26, s73
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v27, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s29 :: v_dual_mov_b32 v25, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s87 :: v_dual_mov_b32 v64, s86
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s85 :: v_dual_mov_b32 v10, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, s4 :: v_dual_mov_b32 v68, s48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, s81 :: v_dual_mov_b32 v66, s84
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s83 :: v_dual_mov_b32 v69, s70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s82 :: v_dual_mov_b32 v23, s80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v71, s66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s39 :: v_dual_mov_b32 v21, s69
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v81, s55
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s67 :: v_dual_mov_b32 v19, s65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v83, s51
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s64 :: v_dual_mov_b32 v17, s54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v11, s52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v12, s50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s78 :: v_dual_mov_b32 v7, s88
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
+; GFX11-TRUE16-NEXT: s_mov_b32 s58, s11
+; GFX11-TRUE16-NEXT: v_readlane_b32 s59, v43, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s60, v43, 9
+; GFX11-TRUE16-NEXT: v_readlane_b32 s61, v43, 10
+; GFX11-TRUE16-NEXT: v_readlane_b32 s62, v43, 11
+; GFX11-TRUE16-NEXT: v_readlane_b32 s63, v43, 12
+; GFX11-TRUE16-NEXT: v_readlane_b32 s72, v43, 13
+; GFX11-TRUE16-NEXT: v_readlane_b32 s73, v43, 14
+; GFX11-TRUE16-NEXT: v_readlane_b32 s13, v43, 15
+; GFX11-TRUE16-NEXT: v_readlane_b32 s15, v43, 16
+; GFX11-TRUE16-NEXT: v_readlane_b32 s41, v43, 17
+; GFX11-TRUE16-NEXT: v_readlane_b32 s46, v43, 18
+; GFX11-TRUE16-NEXT: v_readlane_b32 s47, v43, 19
+; GFX11-TRUE16-NEXT: v_readlane_b32 s11, v43, 20
+; GFX11-TRUE16-NEXT: v_readlane_b32 s57, v43, 21
+; GFX11-TRUE16-NEXT: v_readlane_b32 s10, v43, 22
+; GFX11-TRUE16-NEXT: v_readlane_b32 s74, v43, 23
+; GFX11-TRUE16-NEXT: v_readlane_b32 s9, v43, 24
+; GFX11-TRUE16-NEXT: v_readlane_b32 s75, v43, 25
+; GFX11-TRUE16-NEXT: v_readlane_b32 s8, v43, 26
+; GFX11-TRUE16-NEXT: v_readlane_b32 s76, v43, 27
+; GFX11-TRUE16-NEXT: v_readlane_b32 s77, v43, 28
+; GFX11-TRUE16-NEXT: v_readlane_b32 s78, v43, 29
+; GFX11-TRUE16-NEXT: v_readlane_b32 s79, v43, 30
+; GFX11-TRUE16-NEXT: v_readlane_b32 s88, v43, 31
+; GFX11-TRUE16-NEXT: v_readlane_b32 s89, v42, 0
+; GFX11-TRUE16-NEXT: v_readlane_b32 s90, v42, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s91, v42, 2
+; GFX11-TRUE16-NEXT: v_readlane_b32 s92, v42, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 s45, v42, 4
+; GFX11-TRUE16-NEXT: v_readlane_b32 s93, v42, 5
+; GFX11-TRUE16-NEXT: v_readlane_b32 vcc_hi, v43, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s44, v42, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v43, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s95, v42, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s43, v42, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s35, v43, 5
+; GFX11-TRUE16-NEXT: .LBB91_5: ; %end
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s104, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s103, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s56, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s102, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s58, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s101, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s100, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s99, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s40, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s43, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s59, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s95, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s44, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s93, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s14, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s17, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s45, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s60, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s92, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s91, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s90, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s12, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s19, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s89, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s61, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s88, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s79, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s78, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s30, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s21, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s77, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s62, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s76, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s8, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s75, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s94, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s23, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s9, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s63, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s74, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[97:100], off
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:16
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s10, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s57, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s34, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s2, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s11, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s72, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s47, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s46, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s41, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, vcc_lo, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s15, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s3, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s27, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s13, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s73, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v27, 0xff, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v4, 8, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v14
+; GFX11-TRUE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-TRUE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v5, 8, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v96, v14
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xff, v26
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v12, 8, v12
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 8, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v4, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v24
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v25
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 8, v87
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xff, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xff, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v26, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v14, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v25, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v30, v13
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v28
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 8, v85
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xff, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xff, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xff, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xff, v31
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v30, 8, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v25, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v26, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v28, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v29, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v4, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v11, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v12, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v16, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v24
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v36
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v82
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v8, 8, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v32
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v34
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v81
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v39
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v9, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v12, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v13, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v16, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v18, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v80
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v35
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v37
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v51
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v22
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xff, v70
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 8, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v14, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v16, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v18, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v20, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v22, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v9, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v11, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v13, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v14, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v17, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v38
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v23
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v48
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 8, v69
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xff, v53
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 8, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v67
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v50
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v66
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v14, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v17, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v19, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v20, v21
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 8, v65
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v49
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xff, v54
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xff, v52
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v19, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v21, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v22, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v14
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v8, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v17, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v18, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v10, v3
+; GFX11-TRUE16-NEXT: s_clause 0x5
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[27:30], off offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[4:7], off offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[11:14], off offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[15:18], off offset:112
+; GFX11-TRUE16-NEXT: v_readlane_b32 s104, v41, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s103, v41, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s102, v41, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s101, v41, 5
+; GFX11-TRUE16-NEXT: v_readlane_b32 s100, v41, 4
+; GFX11-TRUE16-NEXT: v_readlane_b32 s99, v41, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 s98, v41, 2
+; GFX11-TRUE16-NEXT: v_readlane_b32 s97, v41, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s96, v41, 0
+; GFX11-TRUE16-NEXT: v_readlane_b32 s87, v40, 31
+; GFX11-TRUE16-NEXT: v_readlane_b32 s86, v40, 30
+; GFX11-TRUE16-NEXT: v_readlane_b32 s85, v40, 29
+; GFX11-TRUE16-NEXT: v_readlane_b32 s84, v40, 28
+; GFX11-TRUE16-NEXT: v_readlane_b32 s83, v40, 27
+; GFX11-TRUE16-NEXT: v_readlane_b32 s82, v40, 26
+; GFX11-TRUE16-NEXT: v_readlane_b32 s81, v40, 25
+; GFX11-TRUE16-NEXT: v_readlane_b32 s80, v40, 24
+; GFX11-TRUE16-NEXT: v_readlane_b32 s71, v40, 23
+; GFX11-TRUE16-NEXT: v_readlane_b32 s70, v40, 22
+; GFX11-TRUE16-NEXT: v_readlane_b32 s69, v40, 21
+; GFX11-TRUE16-NEXT: v_readlane_b32 s68, v40, 20
+; GFX11-TRUE16-NEXT: v_readlane_b32 s67, v40, 19
+; GFX11-TRUE16-NEXT: v_readlane_b32 s66, v40, 18
+; GFX11-TRUE16-NEXT: v_readlane_b32 s65, v40, 17
+; GFX11-TRUE16-NEXT: v_readlane_b32 s64, v40, 16
+; GFX11-TRUE16-NEXT: v_readlane_b32 s55, v40, 15
+; GFX11-TRUE16-NEXT: v_readlane_b32 s54, v40, 14
+; GFX11-TRUE16-NEXT: v_readlane_b32 s53, v40, 13
+; GFX11-TRUE16-NEXT: v_readlane_b32 s52, v40, 12
+; GFX11-TRUE16-NEXT: v_readlane_b32 s51, v40, 11
+; GFX11-TRUE16-NEXT: v_readlane_b32 s50, v40, 10
+; GFX11-TRUE16-NEXT: v_readlane_b32 s49, v40, 9
+; GFX11-TRUE16-NEXT: v_readlane_b32 s48, v40, 8
+; GFX11-TRUE16-NEXT: v_readlane_b32 s39, v40, 7
+; GFX11-TRUE16-NEXT: v_readlane_b32 s38, v40, 6
+; GFX11-TRUE16-NEXT: v_readlane_b32 s37, v40, 5
+; GFX11-TRUE16-NEXT: v_readlane_b32 s36, v40, 4
+; GFX11-TRUE16-NEXT: v_readlane_b32 s35, v40, 3
+; GFX11-TRUE16-NEXT: v_readlane_b32 s34, v40, 2
+; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v40, 1
+; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v40, 0
+; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s0, -1
+; GFX11-TRUE16-NEXT: s_clause 0x3
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:12
+; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v128i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_or_saveexec_b32 s4, -1
+; GFX11-FAKE16-NEXT: s_clause 0x3
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_store_b32 off, v43, s32 offset:12
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s30, 0
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s96, 0
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s72, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s73, v2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s31, 1
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s97, 1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s62, v3
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s63, v4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s60, v5
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s34, 2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s98, 2
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s61, v6
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s58, v7
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s59, v8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s35, 3
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s99, 3
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s56, v9
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s57, v10
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s46, v11
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s36, 4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s100, 4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s47, v12
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v13
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s45, v14
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s37, 5
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s101, 5
+; GFX11-FAKE16-NEXT: s_mov_b32 vcc_hi, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s38, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s102, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s39, 7
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s103, 7
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s48, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v41, s104, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s49, 9
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s50, 10
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s51, 11
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s52, 12
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s53, 13
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s54, 14
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s55, 15
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s64, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s65, 17
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s66, 18
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s67, 19
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s68, 20
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s69, 21
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s70, 22
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s71, 23
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s80, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s81, 25
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s82, 26
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s83, 27
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s84, 28
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s85, 29
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s86, 30
+; GFX11-FAKE16-NEXT: v_writelane_b32 v40, s87, 31
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s27, 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 15
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s99, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s100, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s101, s1, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 14
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s27, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s85, s45, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s45, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 17
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s26, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s45, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s87, s44, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s86, s44, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 18
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s25, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s81, s47, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s98, s47, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s84, s47, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 19
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s48, s46, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s70, s57, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s97, s57, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 13
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s25, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s80, s57, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s83, s56, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s82, s56, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 20
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s66, s59, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s59, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s69, s59, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 21
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s24, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s71, s58, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s39, s58, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s55, s61, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 22
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s23, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s61, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s65, s61, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s68, s60, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 23
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s67, s60, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s51, s63, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s96, s63, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 12
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s23, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s54, s63, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s38, s62, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s64, s62, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s36, s73, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s73, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s50, s73, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 25
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s22, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s53, s72, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s52, s72, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s34, s29, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 26
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s21, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s35, s29, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s37, s28, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 27
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s49, s28, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 11
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s21, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[74:75], s[44:45], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[76:77], s[46:47], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 28
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[78:79], s[56:57], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 29
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s20, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 30
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s19, 24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 31
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s19, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 10
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s19, 8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s18, 8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s17, 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 3
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s17, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 9
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s17, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 5
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 8
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 6
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s3, 24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 7
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s3, 8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v42, s4, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s46, 16
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 7
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 5
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 3
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s12, 0
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s13, 1
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-FAKE16-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s29, 0xffff0000
+; GFX11-FAKE16-NEXT: s_and_b32 s14, s47, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s15, s47, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s45, 0xffff0000
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s47, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s45, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s78, s28, 0xffff0000
+; GFX11-FAKE16-NEXT: s_bfe_u32 s6, s47, 0x10010
+; GFX11-FAKE16-NEXT: s_lshl_b32 s79, s28, 16
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s6, s47
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s73, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s77, s73, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s75, s72, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s76, s72, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s63, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s74, s63, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s72, s62, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s73, s62, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s63, s61, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s62, s61, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s61, s60, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s60, s60, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s41, s59, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s40, s59, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s28, s58, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s29, s58, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s13, s57, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s57, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s42, s56, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s43, s56, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s12, s46, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s46, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s44, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s44, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s47, 22
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s47, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s58, s44, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s78
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s1, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s79
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s1, 0x10010
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s1
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s1, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v6
+; GFX11-FAKE16-NEXT: s_lshr_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s0, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s77
+; GFX11-FAKE16-NEXT: s_bfe_u32 s5, s0, 0x10010
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v22, 16, v4
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s5, s0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s44, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s0, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s76
+; GFX11-FAKE16-NEXT: s_lshr_b32 s59, s44, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s75
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s3, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v87, 24, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s3, 0x10010
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s3
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s3, 22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s3, s3, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v8, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v24
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s74
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v25, 16, v5
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v85, 24, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s2, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s11, s2, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s11, s2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s44, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s2, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s2, s2, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v2, 16, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v86, 16, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s17, s17, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s73
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s72
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s17, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s72, s44, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s17, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v27
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s17
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s17, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v28, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-FAKE16-NEXT: s_cselect_b32 s17, s17, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshr_b32 s17, s17, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s63
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 24, v16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v5, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v29
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s16, s16, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s44, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s16, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s62
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s16, 0x10010
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s16
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s16, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s16, s16, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s16, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s44, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s60
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s61
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s44
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s44, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-FAKE16-NEXT: s_lshl_b32 s19, s19, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s19
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s60, s44, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s19, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v9, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s45, s19, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s45, s45, s19
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s19, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s19, s19, s45
+; GFX11-FAKE16-NEXT: s_and_b32 s44, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v3, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s44
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_lshr_b32 s19, s19, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s29
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s41
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s41, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s47, s17, s72
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s44, s41, 0x10010
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_add_i32 s44, s44, s41
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s41, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s40
+; GFX11-FAKE16-NEXT: s_and_b32 s45, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s41, s41, s44
+; GFX11-FAKE16-NEXT: s_lshl_b32 s18, s18, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s18, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v1, 16, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_bfe_u32 s40, s18, 0x10010
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s28
+; GFX11-FAKE16-NEXT: s_add_i32 s44, s40, s18
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s41, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s18, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s41, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s18, s18, s44
+; GFX11-FAKE16-NEXT: s_and_b32 s41, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s41
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s18, s18, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s28, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v10, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s29, s28, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s29, s29, s28
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s28, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s41, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s28, s28, s29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s21, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s21
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshr_b32 s61, s28, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s44, s2, s11
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s21, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s29, s21, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s29, s29, s21
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s21, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s28, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s21, s21, s29
+; GFX11-FAKE16-NEXT: s_and_b32 s28, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v10
+; GFX11-FAKE16-NEXT: s_lshr_b32 s21, s21, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s45, s3, s59
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s46, s16, s46
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s13
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s13, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 24, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_bfe_u32 s28, s13, 0x10010
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, s13
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s13, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s28, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s29, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s13, s28
+; GFX11-FAKE16-NEXT: s_lshl_b32 s20, s20, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s20
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v35
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s20, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v2, 16, v9
+; GFX11-FAKE16-NEXT: s_bfe_u32 s10, s20, 0x10010
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s10, s20
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s13, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s28, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s20, 22
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s20, s28
+; GFX11-FAKE16-NEXT: s_and_b32 s20, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s42
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s20
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s43
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s28, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_bfe_u32 s20, s28, 0x10010
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_add_i32 s29, s20, s28
+; GFX11-FAKE16-NEXT: s_lshr_b32 s20, s13, 16
+; GFX11-FAKE16-NEXT: s_addk_i32 s29, 0x7fff
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s28, 22
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s28, s29
+; GFX11-FAKE16-NEXT: s_lshl_b32 s23, s23, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s62, s13, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v9
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s23, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s28, s23, 0x10010
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, s23
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s23, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s28, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s23, s28
+; GFX11-FAKE16-NEXT: s_and_b32 s23, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s23
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s14
+; GFX11-FAKE16-NEXT: s_lshr_b32 s23, s13, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s14, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v71, v37, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s12
+; GFX11-FAKE16-NEXT: s_bfe_u32 s15, s14, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s15, s15, s14
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s15, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s13, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s13, s14, s15
+; GFX11-FAKE16-NEXT: s_lshl_b32 s14, s22, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v38
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s13, 16
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s14, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v70, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
+; GFX11-FAKE16-NEXT: s_bfe_u32 s12, s14, 0x10010
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_add_i32 s12, s12, s14
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s14, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s15, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s12, s14, s12
+; GFX11-FAKE16-NEXT: s_and_b32 s14, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s9, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshr_b32 s22, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: s_bfe_u32 s14, s9, 0x10010
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_add_i32 s14, s14, s9
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s9, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s14, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s12, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s9, s9, s14
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s25, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v3, v4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s63, s9, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_bfe_u32 s12, s8, 0x10010
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_add_i32 s12, s12, s8
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s12, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s9, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_cselect_b32 s8, s8, s12
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_lshr_b32 s25, s8, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v12, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s7, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s0, s5
+; GFX11-FAKE16-NEXT: s_bfe_u32 s9, s7, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v3
+; GFX11-FAKE16-NEXT: s_add_i32 s9, s9, s7
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s7, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s8, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s7, s7, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s8, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s4, s8, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v2
+; GFX11-FAKE16-NEXT: s_add_i32 s4, s4, s8
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s8, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s6, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s4, s8, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v52, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v12
+; GFX11-FAKE16-NEXT: s_lshr_b32 s24, s4, 16
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v52
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_bfe_u32 s7, s6, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_add_i32 s7, s7, s6
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s6, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s4, s6, s7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s27, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v4, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_lshr_b32 s73, s4, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v49
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v51
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v66, v1, 16, v11
+; GFX11-FAKE16-NEXT: s_bfe_u32 s7, s6, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_add_i32 s7, s7, s6
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s6, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s4, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshr_b32 s27, s4, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v52
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v55, v50, 16, v4
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s22, s13
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v54, v2, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v67, v48, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[8:9], 24, v[17:18]
+; GFX11-FAKE16-NEXT: s_bfe_u32 s5, s6, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[9:10], 24, v[15:16]
+; GFX11-FAKE16-NEXT: s_add_i32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s6, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s14, s6, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s26, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s20, s10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s14, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[10:11], 24, v[13:14]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[6:7]
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s1, s58
+; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[1:2], 24, v[54:55]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[2:3], 24, v[66:67]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[70:71]
+; GFX11-FAKE16-NEXT: s_bfe_u32 s10, s11, 0x10010
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
+; GFX11-FAKE16-NEXT: s_add_i32 s10, s10, s11
+; GFX11-FAKE16-NEXT: s_bitset1_b32 s11, 22
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x7fff
+; GFX11-FAKE16-NEXT: s_and_b32 s14, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cselect_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s19, s60
+; GFX11-FAKE16-NEXT: s_lshr_b32 s26, s10, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s18, s40
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s23, s62
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 24, v55
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 8, v55
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v54
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 8, v54
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 24, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 8, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v66
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 8, v66
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 24, v71
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 8, v71
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v70
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 8, v70
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 24, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 8, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 8, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 8, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 8, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 8, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 8, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 8, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 8, v6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s21, s61
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s25, s63
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s57, s27, s73
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s56, s26, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s24, s12
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[14:15], s[46:47], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[40:41], s[44:45], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[42:43], s[28:29], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 vcc, s[56:57], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s57, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s57, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s56, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s56, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s56, s11, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s11, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s57, s10, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s10, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s74, s9, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s9, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s75, s8, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s8, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s76, s7, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s77, s7, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s78, s6, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s79, s6, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s88, s5, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s89, s5, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s90, s4, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s91, s4, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s92, s47, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s47, s47, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s93, s46, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s46, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s95, s45, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s45, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s99, s44, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s100, s44, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s101, s29, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s102, s29, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s103, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-FAKE16-NEXT: s_branch .LBB91_5
+; GFX11-FAKE16-NEXT: .LBB91_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr104
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr103
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr102
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr101
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr100
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr99
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr96
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr97
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr84
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr98
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr86
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr87
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr85
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr94
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr92
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr90
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr88
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr78
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr76
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 0
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s5, 1
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s4, 2
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s5, 3
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s74, 4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s75, 5
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; kill: killed $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s74, 6
+; GFX11-FAKE16-NEXT: v_writelane_b32 v43, s75, 7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr74
+; GFX11-FAKE16-NEXT: s_branch .LBB91_2
+; GFX11-FAKE16-NEXT: .LBB91_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s94 :: v_dual_mov_b32 v11, s30
+; GFX11-FAKE16-NEXT: v_readlane_b32 s94, v43, 2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s49 :: v_dual_mov_b32 v7, s35
+; GFX11-FAKE16-NEXT: v_readlane_b32 s95, v43, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 vcc_lo, v43, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s30, v43, 0
+; GFX11-FAKE16-NEXT: v_readlane_b32 s34, v43, 4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s44 :: v_dual_mov_b32 v51, s45
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v49, s46
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v39, s47 :: v_dual_mov_b32 v48, s98
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s56 :: v_dual_mov_b32 v37, s97
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s57 :: v_dual_mov_b32 v35, s58
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s60 :: v_dual_mov_b32 v31, s61
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v29, s62
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s63 :: v_dual_mov_b32 v28, s96
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s72 :: v_dual_mov_b32 v25, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s73 :: v_dual_mov_b32 v23, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v22, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v53, s87 :: v_dual_mov_b32 v54, s86
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s85 :: v_dual_mov_b32 v12, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v65, s4 :: v_dual_mov_b32 v66, s48
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v55, s81 :: v_dual_mov_b32 v64, s84
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v69, s83 :: v_dual_mov_b32 v70, s82
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v67, s70 :: v_dual_mov_b32 v68, s80
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v19, s39
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v71, s66 :: v_dual_mov_b32 v20, s69
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v17, s67
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v81, s55 :: v_dual_mov_b32 v18, s65
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v15, s64
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v83, s51 :: v_dual_mov_b32 v16, s54
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v13, s52
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v14, s50
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s78 :: v_dual_mov_b32 v4, s88
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
+; GFX11-FAKE16-NEXT: s_mov_b32 s58, s11
+; GFX11-FAKE16-NEXT: v_readlane_b32 s59, v43, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s72, v43, 9
+; GFX11-FAKE16-NEXT: v_readlane_b32 s60, v43, 10
+; GFX11-FAKE16-NEXT: v_readlane_b32 s61, v43, 11
+; GFX11-FAKE16-NEXT: v_readlane_b32 s62, v43, 12
+; GFX11-FAKE16-NEXT: v_readlane_b32 s63, v43, 13
+; GFX11-FAKE16-NEXT: v_readlane_b32 s73, v43, 14
+; GFX11-FAKE16-NEXT: v_readlane_b32 s13, v43, 15
+; GFX11-FAKE16-NEXT: v_readlane_b32 s15, v43, 16
+; GFX11-FAKE16-NEXT: v_readlane_b32 s41, v43, 17
+; GFX11-FAKE16-NEXT: v_readlane_b32 s43, v43, 18
+; GFX11-FAKE16-NEXT: v_readlane_b32 s56, v43, 19
+; GFX11-FAKE16-NEXT: v_readlane_b32 s11, v43, 20
+; GFX11-FAKE16-NEXT: v_readlane_b32 s57, v43, 21
+; GFX11-FAKE16-NEXT: v_readlane_b32 s10, v43, 22
+; GFX11-FAKE16-NEXT: v_readlane_b32 s74, v43, 23
+; GFX11-FAKE16-NEXT: v_readlane_b32 s9, v43, 24
+; GFX11-FAKE16-NEXT: v_readlane_b32 s75, v43, 25
+; GFX11-FAKE16-NEXT: v_readlane_b32 s8, v43, 26
+; GFX11-FAKE16-NEXT: v_readlane_b32 s76, v43, 27
+; GFX11-FAKE16-NEXT: v_readlane_b32 s77, v43, 28
+; GFX11-FAKE16-NEXT: v_readlane_b32 s78, v43, 29
+; GFX11-FAKE16-NEXT: v_readlane_b32 s79, v43, 30
+; GFX11-FAKE16-NEXT: v_readlane_b32 s88, v43, 31
+; GFX11-FAKE16-NEXT: v_readlane_b32 s89, v42, 0
+; GFX11-FAKE16-NEXT: v_readlane_b32 s90, v42, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s91, v42, 2
+; GFX11-FAKE16-NEXT: v_readlane_b32 s92, v42, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 s47, v42, 4
+; GFX11-FAKE16-NEXT: v_readlane_b32 s93, v42, 5
+; GFX11-FAKE16-NEXT: v_readlane_b32 vcc_hi, v43, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s46, v42, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s31, v43, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s95, v42, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s45, v42, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s35, v43, 5
+; GFX11-FAKE16-NEXT: .LBB91_5: ; %end
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s104, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s103, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s42, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s102, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s58, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s101, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s100, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s99, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s40, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s45, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s59, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s95, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s4
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s3, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s46, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s93, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s14, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s17, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s47, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s72, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s92, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s91, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s90, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s12, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s19, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s89, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s60, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s88, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s79, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s78, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s30, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s21, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s77, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s61, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s76, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s8, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s75, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s94, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s23, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s9, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s62, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s74, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[97:100], off
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:16
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s10, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s57, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s34, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s2, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s11, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s63, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s56, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s43, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s1, s2
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s41, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, vcc_lo, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s15, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s3, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s27, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s13, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s73, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v23, 0xff, v23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v6, 8, v6
+; GFX11-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v11, 8, v11
+; GFX11-FAKE16-NEXT: s_or_b32 s3, s4, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v23, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v96, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xff, v24
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v14
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v16, 8, v16
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, v6, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v22
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v87
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xff, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xff, v86
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v11, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v22, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v26, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v24, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v25
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v85
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xff, v84
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xff, v28
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v83
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v14, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v22, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v24, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v25, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, v26, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, v6, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, v11, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, v13, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v15, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xff, v32
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v10, 8, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xff, v31
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v30
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v16, 8, v81
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xff, v35
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v18, 8, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v11, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v13, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v15, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xff, v80
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xff, v34
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v16, 8, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xff, v33
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v18, 8, v71
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xff, v38
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v70
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xff, v69
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v14, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v15, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v17, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v19, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v21, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v10, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v13, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v14, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v36
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 8, v68
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v37
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v67
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xff, v49
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v66
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xff, v65
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xff, v39
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v64
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v10, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v16, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v18, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v19, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xff, v48
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v55
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xff, v52
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v54
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xff, v53
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xff, v51
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v12, 8, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xff, v50
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v16, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, v18, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v20, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, v21, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v22, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, v3, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v10, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v11, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v17, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v12, v5
+; GFX11-FAKE16-NEXT: s_clause 0x5
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[23:26], off offset:64
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:80
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:96
+; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
+; GFX11-FAKE16-NEXT: v_readlane_b32 s104, v41, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s103, v41, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s102, v41, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s101, v41, 5
+; GFX11-FAKE16-NEXT: v_readlane_b32 s100, v41, 4
+; GFX11-FAKE16-NEXT: v_readlane_b32 s99, v41, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 s98, v41, 2
+; GFX11-FAKE16-NEXT: v_readlane_b32 s97, v41, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s96, v41, 0
+; GFX11-FAKE16-NEXT: v_readlane_b32 s87, v40, 31
+; GFX11-FAKE16-NEXT: v_readlane_b32 s86, v40, 30
+; GFX11-FAKE16-NEXT: v_readlane_b32 s85, v40, 29
+; GFX11-FAKE16-NEXT: v_readlane_b32 s84, v40, 28
+; GFX11-FAKE16-NEXT: v_readlane_b32 s83, v40, 27
+; GFX11-FAKE16-NEXT: v_readlane_b32 s82, v40, 26
+; GFX11-FAKE16-NEXT: v_readlane_b32 s81, v40, 25
+; GFX11-FAKE16-NEXT: v_readlane_b32 s80, v40, 24
+; GFX11-FAKE16-NEXT: v_readlane_b32 s71, v40, 23
+; GFX11-FAKE16-NEXT: v_readlane_b32 s70, v40, 22
+; GFX11-FAKE16-NEXT: v_readlane_b32 s69, v40, 21
+; GFX11-FAKE16-NEXT: v_readlane_b32 s68, v40, 20
+; GFX11-FAKE16-NEXT: v_readlane_b32 s67, v40, 19
+; GFX11-FAKE16-NEXT: v_readlane_b32 s66, v40, 18
+; GFX11-FAKE16-NEXT: v_readlane_b32 s65, v40, 17
+; GFX11-FAKE16-NEXT: v_readlane_b32 s64, v40, 16
+; GFX11-FAKE16-NEXT: v_readlane_b32 s55, v40, 15
+; GFX11-FAKE16-NEXT: v_readlane_b32 s54, v40, 14
+; GFX11-FAKE16-NEXT: v_readlane_b32 s53, v40, 13
+; GFX11-FAKE16-NEXT: v_readlane_b32 s52, v40, 12
+; GFX11-FAKE16-NEXT: v_readlane_b32 s51, v40, 11
+; GFX11-FAKE16-NEXT: v_readlane_b32 s50, v40, 10
+; GFX11-FAKE16-NEXT: v_readlane_b32 s49, v40, 9
+; GFX11-FAKE16-NEXT: v_readlane_b32 s48, v40, 8
+; GFX11-FAKE16-NEXT: v_readlane_b32 s39, v40, 7
+; GFX11-FAKE16-NEXT: v_readlane_b32 s38, v40, 6
+; GFX11-FAKE16-NEXT: v_readlane_b32 s37, v40, 5
+; GFX11-FAKE16-NEXT: v_readlane_b32 s36, v40, 4
+; GFX11-FAKE16-NEXT: v_readlane_b32 s35, v40, 3
+; GFX11-FAKE16-NEXT: v_readlane_b32 s34, v40, 2
+; GFX11-FAKE16-NEXT: v_readlane_b32 s31, v40, 1
+; GFX11-FAKE16-NEXT: v_readlane_b32 s30, v40, 0
+; GFX11-FAKE16-NEXT: s_or_saveexec_b32 s0, -1
+; GFX11-FAKE16-NEXT: s_clause 0x3
+; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32
+; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:12
+; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s0
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -180286,9 +185691,10 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_and_b32 v1, 0xff, v35
; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
@@ -180304,6 +185710,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
@@ -180314,201 +185721,169 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v6, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v66
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v37
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v36
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v118
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v39
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v2, v70
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v48
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v82
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v7, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v81
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v9, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v10, 16, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v86
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v48
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v3, v82
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v55
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v2, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v51
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v52
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v3, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v1, v84
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v85
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v10, v97
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v87
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v99
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v103
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v114
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v98
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v0, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v54
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v87
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v2, v97
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v102
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v103
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v101
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v100
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v113
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v101
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v116
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v14, v128
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v114
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v1, v113
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v117
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v112
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v117
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v102
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v130
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v133
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v14, v132
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v0, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v116
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v128
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v134
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v132
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v133
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v3, v130
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v0, v161
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v147
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v148
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v118
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v129
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v161
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v166
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v144
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v134
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v18, v147
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v167
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v0, v166
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v144
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v0, v180
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v149
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v177
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v165
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v162
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v42
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v41
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v0, v42
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v162
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v41
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v178
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v115
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v45
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v44
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v0, v45
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v115
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v44
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v119
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v59
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v56
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v0, v59
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v119
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v56
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v145
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v135
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v60
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v61
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v0, v60
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v135
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v61
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v150
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v146
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v63
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v62
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v0, v63
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v146
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v62
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v160
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v73
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v72
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v0, v73
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v160
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v72
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v176
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v164
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v75
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v74
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v0, v75
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v164
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v74
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v179
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v77
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v76
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v0, v77
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v183
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v182
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v78
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v79
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v0, v78
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v182
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v79
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v43
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v40
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v88
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v0, v89
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v40
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v88
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v46
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v91
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v90
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v0, v91
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v46
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v58
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v57
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v92
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v93
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v0, v92
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v57
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v93
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB93_3
; GFX11-TRUE16-NEXT: .LBB93_2: ; %cmp.true
@@ -180548,57 +185923,59 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(38)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(37)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v58
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(35)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v91, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(33)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v43
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v40
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v43, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(31)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v183
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v182
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v89, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(29)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v181
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v88, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v78, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v79, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v179
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v179, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v77, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(27)
@@ -180607,7 +185984,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v164
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(25)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v163
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v163, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v76, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -180618,18 +185995,18 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v74, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v73, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(23)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v150
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v150, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v72, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v146
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(21)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v145
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v135
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v63, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -180637,13 +186014,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(19)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v131
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v62, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v60, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v61, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v119
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v119, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v59, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17)
@@ -180652,29 +186029,29 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v115
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v165
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v56, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v162
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v45, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v44, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v42, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v151
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v41, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v149
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v148
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v144
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v180, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -180688,8 +186065,8 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v133, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v129
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v161, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v118
@@ -180697,167 +186074,141 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v117
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v116
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v116, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v147, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v114
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v114, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v132, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v130, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v103
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v98
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v54
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v103
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v103, 0x300, v0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v128, v3
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v99
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v113, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 3, v35
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 3, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v113, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v128, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v100
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v101, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v102, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v101, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v102, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v97, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v55
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v96
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v97, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v100
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xff, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v87, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v51
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v86, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v85, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v84, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v52
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v50
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v83, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v48
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v49
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v87, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v82, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v81, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v71, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v80, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v86, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v85, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v84, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v50
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v83, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v82, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v81, v5
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v38, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v71, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v80, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v37, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v35
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v69, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v112, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v68, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v67, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v66, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v32
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v65, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v70, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 3, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v69, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xff, v35
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v112, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v67, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v68, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, v66, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v34, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v50, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v15, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v116
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v129
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v114, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v144, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v115
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v135
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v145, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v119, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v182
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v2, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v36, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v65, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v34
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v36.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v33.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v133, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v160, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v179, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v103.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v114.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.h, v129.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.h, v133.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.h, v144.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.h, v145.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v115.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v119.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.h, v131.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.h, v135.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.h, v150.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.h, v160.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.h, v179.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.h, v181.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB93_3: ; %end
; GFX11-TRUE16-NEXT: s_clause 0x1e
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
@@ -202763,9 +208114,10 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_and_b32 v1, 0xff, v35
; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
@@ -202781,6 +208133,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
@@ -202791,201 +208144,169 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v68
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v6, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v66
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v37
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v36
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v118
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v39
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v2, v70
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v48
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v82
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v7, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v81
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v9, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v10, 16, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v86
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v48
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v3, v82
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v55
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v2, v71
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v51
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v52
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v3, v86
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v1, v84
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v85
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v10, v97
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v87
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v99
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v103
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v114
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v98
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v0, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v96
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v54
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v87
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v2, v97
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v102
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v103
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v101
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v100
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v113
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v101
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v116
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v14, v128
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v114
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v1, v113
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v117
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v112
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v117
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v102
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v130
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xff, v133
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v14, v132
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v0, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v116
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v128
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v134
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v132
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v133
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v3, v130
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v0, v161
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v129
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v147
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v148
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v118
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xff, v129
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v16, v161
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v166
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v144
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v134
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v18, v147
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v167
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v0, v166
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v144
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v167
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v0, v180
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v149
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v177
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v165
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v162
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v42
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v41
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v0, v42
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v162
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v41
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v178
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v115
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v45
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v44
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v0, v45
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v115
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v44
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v119
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v59
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v56
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v0, v59
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v119
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v56
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v145
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v135
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v60
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v61
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v0, v60
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v135
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v61
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v150
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v146
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v63
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v62
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v0, v63
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v146
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v62
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v160
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v73
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v72
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v0, v73
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v160
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v72
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v176
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v164
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v75
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v74
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v0, v75
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v164
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v74
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v179
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v77
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v76
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v0, v77
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v179
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v183
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v182
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v78
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v79
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v0, v78
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v182
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v79
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v43
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v40
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v88
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v0, v89
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v40
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v88
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v46
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v91
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v90
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v0, v91
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v46
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v58
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v57
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v92
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v93
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v0, v92
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v57
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v93
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB97_3
; GFX11-TRUE16-NEXT: .LBB97_2: ; %cmp.true
@@ -203025,57 +208346,59 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(38)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(37)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v58
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v57
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(35)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v47
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v46
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v92, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v91, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(33)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v43
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v93, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v2
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v40
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v31, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v43, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v90, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(31)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v183
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v182
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v89, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v30, 0x300, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(29)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v181
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v88, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v78, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v79, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v29, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v181, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v179
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v182, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v179, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v77, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(27)
@@ -203084,7 +208407,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v164
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(25)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v163
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v163, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v76, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -203095,18 +208418,18 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v74, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v73, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(23)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v150
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v150, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v72, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v146
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(21)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v145
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v135
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v63, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -203114,13 +208437,13 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(19)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v131
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v62, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v60, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v61, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v131, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v119
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v135, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v119, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v59, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17)
@@ -203129,29 +208452,29 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v115
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v165
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v56, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v162
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v45, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v115, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v44, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v42, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v151
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v0
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v145, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v41, v3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v149
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v148
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v144
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v180, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
@@ -203165,8 +208488,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v133, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v129
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v144, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v129, 0x300, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v161, v3
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v118
@@ -203174,167 +208497,141 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v117
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v116
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v116, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v147, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v114
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v114, 0x300, v0
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v132, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v130, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v103
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v98
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v54
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v1
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v103
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v103, 0x300, v0
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v128, v3
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v99
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v113, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v53
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 3, v35
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 3, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v113, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v128, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v100
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v101, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v102, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v101, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v102, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v134, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v97, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v55
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v96
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v97, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v100
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xff, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v87, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v51
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v86, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v85, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v84, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v52
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v50
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v83, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v48
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v49
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v39
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v87, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v82, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v81, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v71, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v80, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v86, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v85, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v84, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v51, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v50
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v50, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v83, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v82, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v81, v5
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v38, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v71, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v80, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v37, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v39, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 3, v35
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v69, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v112, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v68, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v67, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v66, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v32
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v65, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v70, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 3, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v69, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xff, v35
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v112, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v35, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v67, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v68, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, v66, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v34, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v50, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v15, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v116
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v129
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v114, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v144, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v115
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v135
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v131
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v145, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v119, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v163
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v182
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v181
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v2, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v36, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v65, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v34
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v36.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v33, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v33.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, s4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v133, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v160, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v179, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v103.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v114.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.h, v129.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.h, v133.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.h, v144.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.h, v145.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v115.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v119.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.h, v131.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.h, v135.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.h, v150.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.h, v160.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.h, v179.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.h, v181.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB97_3: ; %end
; GFX11-TRUE16-NEXT: s_clause 0x1e
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
@@ -221009,700 +226306,1362 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v64bf16_to_v64f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
-; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
-; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
-; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
-; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v0, 0x40c00000, v0 :: v_dual_lshlrev_b32 v3, 16, v17
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v5, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v7, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v11, v3
-; GFX11-NEXT: v_lshl_or_b32 v16, v16, 16, v32
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v4, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v19
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v2, v4
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v6 :: v_dual_add_f32 v3, 0x40c00000, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v8, v3
-; GFX11-NEXT: v_bfe_u32 v8, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_and_b32 v6, 0xffff0000, v22
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v1
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v0
-; GFX11-NEXT: v_bfe_u32 v0, v6, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v23
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, v4, v5
-; GFX11-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v36, 0xffff, v36
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v19, v19, 16, v34
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v2, v4 :: v_dual_add_nc_u32 v0, v1, v5
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_and_b32 v7, 0xffff0000, v25
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v5 :: v_dual_add_nc_u32 v5, 0x7fff, v6
-; GFX11-NEXT: v_add_f32_e32 v6, 0x40c00000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v6, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_lshlrev_b32 v2, 16, v25
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX11-NEXT: v_add_f32_e32 v0, 0x40c00000, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v5, v7 :: v_dual_add_nc_u32 v1, v3, v6
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v26
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_nc_u32 v3, v3, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_and_b32_e32 v39, 0xffff, v39
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v6 :: v_dual_and_b32 v5, 0xffff0000, v27
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v4, v6
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_and_b32_e32 v49, 0xffff, v49
-; GFX11-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v0, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v4, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v29
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v29
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_dual_add_f32 v4, 0x40c00000, v4 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v8, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v2, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_add_f32 v1, 0x40c00000, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v31
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v0
-; GFX11-NEXT: v_bfe_u32 v0, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v7, v1, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v4
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_f32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v7, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v0
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v0
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v4, v6
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v1
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v6 :: v_dual_add_nc_u32 v5, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v7
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v5, v9, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v9
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v4
-; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v10, v9, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v66, 0xffff, v66
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v6
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v9
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v67, 0xffff, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v54, 0xffff, v54
-; GFX11-NEXT: v_dual_cndmask_b32 v6, v7, v8 :: v_dual_add_nc_u32 v7, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v11, v5
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v10, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v7
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v8, v9 :: v_dual_add_nc_u32 v8, v10, v11
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_bfe_u32 v13, v10, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v7
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v8
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v9, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_bfe_u32 v12, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v13
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v15, v12, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v10
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v13
-; GFX11-NEXT: v_bfe_u32 v14, v9, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v11
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v9
-; GFX11-NEXT: v_bfe_u32 v13, v11, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v11
-; GFX11-NEXT: v_bfe_u32 v82, v14, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v12, v15, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v82, v82, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_bfe_u32 v83, v12, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v5, v68, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v82
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v82, v83, v12
-; GFX11-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v82
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-NEXT: v_bfe_u32 v82, v83, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v13
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v85, 0x400000, v83
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v82, v83
-; GFX11-NEXT: v_bfe_u32 v15, v13, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v13
-; GFX11-NEXT: v_bfe_u32 v86, v82, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v85 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v85, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v86, v86, v82
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v87, 16, v14
-; GFX11-NEXT: v_bfe_u32 v14, v83, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v100, 0x400000, v83
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v85, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v86
-; GFX11-NEXT: v_add_f32_e64 v86, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_or_b32_e32 v85, 0x400000, v82
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v86
-; GFX11-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s27, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v97, v82, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v15, v15, v85, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v85, v86, 16, 1
-; GFX11-NEXT: v_bfe_u32 v99, v96, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v97, v97, v82
-; GFX11-NEXT: v_or_b32_e32 v103, 0x400000, v82
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-NEXT: v_add_nc_u32_e32 v99, v99, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v85, v85, v86
-; GFX11-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
-; GFX11-NEXT: v_bfe_u32 v101, v98, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v82, v97, v103 :: v_dual_add_nc_u32 v85, 0x7fff, v85
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v101, v101, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v14, v83
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v82
-; GFX11-NEXT: v_cndmask_b32_e32 v96, v99, v112, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
-; GFX11-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v101
-; GFX11-NEXT: v_or_b32_e32 v101, 0x400000, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v96
-; GFX11-NEXT: v_cndmask_b32_e32 v85, v85, v102, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX11-NEXT: v_and_b32_e32 v82, 0xffff, v82
-; GFX11-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v65, 16, v67
-; GFX11-NEXT: v_dual_cndmask_b32 v86, v97, v101 :: v_dual_and_b32 v65, 0xffff, v28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v85
-; GFX11-NEXT: v_lshrrev_b32_e32 v97, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v80, 0xffff, v80
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 16, v86
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v14, v100, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v86, 0xffff, v96
-; GFX11-NEXT: v_lshl_or_b32 v1, v1, 16, v68
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v83, 16, v82
-; GFX11-NEXT: v_lshl_or_b32 v15, v85, 16, v86
-; GFX11-NEXT: v_and_b32_e32 v83, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v86, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v82, 0xffff, v96
-; GFX11-NEXT: v_and_b32_e32 v96, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v85, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v87, 16, v83
-; GFX11-NEXT: v_lshl_or_b32 v10, v9, 16, v86
-; GFX11-NEXT: v_lshl_or_b32 v13, v97, 16, v82
-; GFX11-NEXT: v_and_b32_e32 v82, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v9, v81, 16, v96
-; GFX11-NEXT: v_and_b32_e32 v81, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v83, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v11, v84, 16, v85
-; GFX11-NEXT: v_lshl_or_b32 v6, v69, 16, v82
-; GFX11-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v64, 16, v66
-; GFX11-NEXT: v_and_b32_e32 v64, 0xffff, v29
-; GFX11-NEXT: v_lshl_or_b32 v7, v70, 16, v81
-; GFX11-NEXT: v_and_b32_e32 v70, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v0, v55, 16, v69
-; GFX11-NEXT: v_and_b32_e32 v55, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v64
-; GFX11-NEXT: v_and_b32_e32 v51, 0xffff, v24
-; GFX11-NEXT: v_and_b32_e32 v66, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v27, v50, 16, v65
-; GFX11-NEXT: v_lshl_or_b32 v29, v52, 16, v55
-; GFX11-NEXT: v_and_b32_e32 v50, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v52, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v24, v38, 16, v39
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v51
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v8, v71, 16, v80
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v83
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v70
-; GFX11-NEXT: v_lshl_or_b32 v30, v53, 16, v54
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v66
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v49
-; GFX11-NEXT: v_lshl_or_b32 v23, v23, 16, v50
-; GFX11-NEXT: v_lshl_or_b32 v21, v21, 16, v52
-; GFX11-NEXT: v_lshl_or_b32 v20, v35, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v18, v33, 16, v37
-; GFX11-NEXT: v_lshl_or_b32 v17, v17, 16, v38
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
-; GFX11-NEXT: .LBB101_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v64f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-TRUE16-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v85, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v97, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v85
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v97, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, v98, v97
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, 0x7fff, v98
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v5, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v32.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v11, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v33.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v4, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v19
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v34.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v2, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v6 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v8, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_and_b32 v6, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v35.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v23
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, v4, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v36.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v1, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v37.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v2, v4 :: v_dual_add_f32 v2, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v7, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_and_b32 v7, 0xffff0000, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v38.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v5 :: v_dual_add_nc_u32 v5, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v39.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_lshlrev_b32 v2, 16, v25
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v5, v7 :: v_dual_add_nc_u32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_nc_u32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v48.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v6 :: v_dual_and_b32 v5, 0xffff0000, v27
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v7, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v4, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v50.l
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v49.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v4, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v29
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v29
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v51.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_cndmask_b32 v0, v0, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v8, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v30
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v30
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v52.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v31
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v53.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v7, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v4, v5 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v54.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v55.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v0, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v64.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v7 :: v_dual_add_nc_u32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v2, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v4, v6 :: v_dual_add_nc_u32 v4, v7, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v68.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v8 :: v_dual_add_nc_u32 v6, v9, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v7, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v70.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v10 :: v_dual_add_nc_u32 v8, v11, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v10
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v9, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v80.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v12 :: v_dual_add_nc_u32 v10, v13, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v82, v82, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v11, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v82, v10, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v82
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v83.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v12, v12, v14 :: v_dual_add_nc_u32 v13, v15, v84
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v82, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v85, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v81.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v84
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v82
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v86.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v84, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, v96, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v87, v82, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v84
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v82
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 16, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, v87, v82
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v85.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v87, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, 0x7fff, v84
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v100, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v87
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v84, v99, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v97
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v97, v97
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, v100, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v87
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v98, v99, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v84, 0x7fff, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v96.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v71.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 16, v97
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v100, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v66.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v15
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v84, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v65.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v87.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB101_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB101_2
+; GFX11-TRUE16-NEXT: .LBB101_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v64f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX11-FAKE16-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v0, 0x40c00000, v0 :: v_dual_lshlrev_b32 v3, 16, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v5, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v11, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v16, 16, v32
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v4, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v19
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v2, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v6 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v8, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_and_b32 v6, 0xffff0000, v22
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v8, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v23
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, v4, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v36, 0xffff, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v19, 16, v34
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v2, v4 :: v_dual_add_nc_u32 v0, v1, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_and_b32 v7, 0xffff0000, v25
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v5 :: v_dual_add_nc_u32 v5, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_lshlrev_b32 v2, 16, v25
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v5, v7 :: v_dual_add_nc_u32 v1, v3, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_nc_u32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v39
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v6 :: v_dual_and_b32 v5, 0xffff0000, v27
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v4, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_cndmask_b32 v0, v0, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v4, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v29
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v5, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v29
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v4, 0x40c00000, v4 :: v_dual_add_f32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v8, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v2, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_add_f32 v1, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v31
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v7, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v4, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v6 :: v_dual_add_nc_u32 v5, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v8, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v66, 0xffff, v66
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v67, 0xffff, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v54, 0xffff, v54
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v6, v7, v8 :: v_dual_add_nc_u32 v7, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v11, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v8, v9 :: v_dual_add_nc_u32 v8, v10, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v82, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v12, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v82, v82, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v83, v12, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v68, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v82, v83, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v82, v83, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v84, 16, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v85, 0x400000, v83
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v82, v83
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v86, v82, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v85 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v85, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v86, v86, v82
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v87, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v83, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v83
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v85, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v86
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v86, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v85, 0x400000, v82
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v86
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v97, v82, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v15, v15, v85, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v85, v86, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v99, v96, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, v97, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v103, 0x400000, v82
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, v99, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v85, v85, v86
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
+; GFX11-FAKE16-NEXT: v_bfe_u32 v101, v98, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v82, v97, v103 :: v_dual_add_nc_u32 v85, 0x7fff, v85
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v101, v101, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v14, v83
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v96, v99, v112, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v101
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v101, 0x400000, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v96
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v85, v85, v102, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v82, 0xffff, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v65, 16, v67
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v86, v97, v101 :: v_dual_and_b32 v65, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v85
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v97, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v80, 0xffff, v80
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v85, 16, v86
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v14, v100, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v86, 0xffff, v96
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v1, 16, v68
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v83, 16, v82
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v85, 16, v86
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v83, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v86, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v82, 0xffff, v96
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v96, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v85, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v87, 16, v83
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v9, 16, v86
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v97, 16, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v82, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v81, 16, v96
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v81, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v83, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v84, 16, v85
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v69, 16, v82
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v64, 16, v66
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v64, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v70, 16, v81
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v70, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v55, 16, v69
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v55, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v64
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v51, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v66, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v50, 16, v65
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v52, 16, v55
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v52, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v38, 16, v39
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v51
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v71, 16, v80
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v83
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v70
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v53, 16, v54
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v66
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v49
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v23, 16, v50
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v21, 16, v52
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v35, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v17, 16, v38
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB101_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB101_2
+; GFX11-FAKE16-NEXT: .LBB101_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -227079,568 +233038,496 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v17
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v20
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v23
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v22
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_add_f32 v32, 0x40c00000, v32
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v16 :: v_dual_lshlrev_b32 v35, 16, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v24
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v33, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v32, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v32
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v33, 0x40c00000, v33 :: v_dual_lshlrev_b32 v52, 16, v24
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v70, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v33, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v32, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v32
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v34
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v37, v33, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v16, v32, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v33
+; GFX11-TRUE16-NEXT: v_add3_u32 v38, v38, v33, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v32, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff0000, v30
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v35, 0x40c00000, v35 :: v_dual_cndmask_b32 v16, v16, v38
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v17
-; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v34, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v34, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v36, 16, 1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v80, 16, v5
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v82, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v17, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v24, 0x40c00000, v24
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_lshlrev_b32 v34, 16, v18
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v34, 0x40c00000, v34
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v26, 0x40c00000, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v80, 16, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v17, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 0x40c00000, v19 :: v_dual_lshlrev_b32 v82, 16, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v64, 16, v28
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 16, v8
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v66, 16, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
+; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v30, 0x40c00000, v30
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 16, v10
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v37, v16, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v68, 16, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v96, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v37, v39, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v36
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v82, 0x40c00000, v82 :: v_dual_lshlrev_b32 v83, 16, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v36, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_cndmask_b32 v33, v38, v48
+; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v17, v35, v37 :: v_dual_and_b32 v6, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v38, v34, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v34
+; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v18, v37, v32 :: v_dual_add_f32 v37, 0x40c00000, v38
-; GFX11-TRUE16-NEXT: v_add3_u32 v32, v34, v36, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v35, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v35, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v35, v38, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_add3_u32 v38, v39, v36, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v35, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v19, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v19
-; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v37, 16, 1
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v83, 0x40c00000, v83 :: v_dual_add_f32 v8, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v32, v33, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v33, v34, v35, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v35
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v19
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 16, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v36, 0x40c00000, v48
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v37, v19, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v35, v38, v39 :: v_dual_lshlrev_b32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v19
+; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v36, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; GFX11-TRUE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v38
-; GFX11-TRUE16-NEXT: v_add3_u32 v33, v36, v37, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v37
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v39 :: v_dual_lshlrev_b32 v39, 16, v20
-; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v38, 16, 1
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v86, 0x40c00000, v86 :: v_dual_lshlrev_b32 v87, 16, v12
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v33, v34, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v36, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v34, v35, v38, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v38
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v48
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v87, 0x40c00000, v87 :: v_dual_lshlrev_b32 v96, 16, v13
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v34, v35, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v34, v37, v36, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v39, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v34.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v33.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v37, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v36, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v36
+; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v20, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v39
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v48, 0x40c00000, v48 :: v_dual_add_f32 v49, 0x40c00000, v21
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v34, v34, v35 :: v_dual_lshlrev_b32 v21, 16, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v35, v37, v39, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v38, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v48, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v50, 0x40c00000, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v97, 0x400000, v87
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v35, v36, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v36, v37, v38, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v38
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v49, v38, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v35.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v32.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v37, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v20, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_add3_u32 v48, v49, v38, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v37, v39, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v48
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v25
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v36, v37, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v48, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v49, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v38, 0x40c00000, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v36.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v39, v39, v21, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v37, v48, v49 :: v_dual_lshlrev_b32 v48, 16, v23
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v38, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v48, 0x40c00000, v48 :: v_dual_cndmask_b32 v21, v39, v49
+; GFX11-TRUE16-NEXT: v_add3_u32 v39, v50, v38, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v38
+; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v22, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11-TRUE16-NEXT: v_bfe_u32 v51, v48, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v23, 0x40c00000, v23
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v37.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v39, v49, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v39, v50, v22, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v22
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-TRUE16-NEXT: v_add3_u32 v50, v51, v48, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v48
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v39, v49, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v49, v23, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v22
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v64, 16, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff0000, v29
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v67, 16, v0
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v49, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v49
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v50, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v22, v37, v38 :: v_dual_lshlrev_b32 v71, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v49, 0x40c00000, v51
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v23
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v50, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v50
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v51 :: v_dual_lshlrev_b32 v51, 16, v24
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v48, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v48
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v48, 0x40c00000, v52
+; GFX11-TRUE16-NEXT: v_add3_u32 v49, v49, v23, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v39, v50, v51 :: v_dual_lshlrev_b32 v50, 16, v25
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v48, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v50, 0x40c00000, v50 :: v_dual_cndmask_b32 v23, v49, v51
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v49, v52, v48, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v48
+; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v24, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v51, 0x40c00000, v51
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v39, v48, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v39, v49, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v22.h
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v71, 0x40c00000, v71 :: v_dual_add_f32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v38, v39, v49, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v49
-; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v50, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v49, 0x40c00000, v52 :: v_dual_lshlrev_b32 v52, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v21
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v24, v38, v39 :: v_dual_and_b32 v5, 0xffff0000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v38, v48, v50, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v50
-; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v51, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v50, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v48, v49, v51, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v49, v52, v24, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v24
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-TRUE16-NEXT: v_add3_u32 v52, v53, v50, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v50
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v49, v51, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v51, v25, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
-; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v49, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v50, 0x40c00000, v54
+; GFX11-TRUE16-NEXT: v_add3_u32 v51, v51, v25, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v49, v52, v53 :: v_dual_lshlrev_b32 v52, 16, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v54, v50, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX11-TRUE16-NEXT: v_add_f32_e32 v52, 0x40c00000, v52
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v80, 0x40c00000, v80 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v38, v39, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v39, v48, v51, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v51
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v51, 0x40c00000, v53
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v81, 16, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v39, v48, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v39, v50, v49, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v49
-; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v52, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v52
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v81, 0x40c00000, v81 :: v_dual_add_f32 v6, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v39, v48, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v48, v50, v52, 0x7fff
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v38.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v51, v53, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v51, v54, v50, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v50
+; GFX11-TRUE16-NEXT: v_bfe_u32 v54, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v52, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v27, 0x40c00000, v27
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v49.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v50, v51, v53, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v51, v54, v26, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v26
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: v_add3_u32 v54, v55, v52, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v51, v53, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v27, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v26
-; GFX11-TRUE16-NEXT: v_bfe_u32 v50, v51, 16, 1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v84, 0x40c00000, v84 :: v_dual_add_f32 v9, 0x40c00000, v9
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v48, v48, v49, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v53, 0x40c00000, v53 :: v_dual_add_f32 v54, 0x40c00000, v26
-; GFX11-TRUE16-NEXT: v_add3_u32 v49, v50, v51, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v50, 0x400000, v51
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v26, 16, v27
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v53, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 16, v10
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v49, v49, v50, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v53, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v54, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v55, 0x40c00000, v26
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v53, 0x40c00000, v27
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v85, 0x40c00000, v85 :: v_dual_cndmask_b32 v26, v50, v51
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v54, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v54
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v55, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v54, 0x40c00000, v64
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v28
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v10, 0x40c00000, v10 :: v_dual_add_f32 v11, 0x40c00000, v11
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v50, v51, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v55, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v55
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v53, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v55, 0x40c00000, v64
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v27.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v28, v50, v51 :: v_dual_and_b32 v13, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_add3_u32 v50, v52, v53, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v51, 0x400000, v53
-; GFX11-TRUE16-NEXT: v_bfe_u32 v52, v54, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53
-; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v55, 16, 1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v26
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v37.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v23
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v50, v50, v51, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v51, v52, v54, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v52, 0x400000, v54
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v64, 16, v29
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v52, 0x40c00000, v64
+; GFX11-TRUE16-NEXT: v_add3_u32 v53, v53, v27, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v51, v54, v55 :: v_dual_lshlrev_b32 v54, 16, v29
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v27
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v51, v52, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v51, v53, v55, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v52, 0x400000, v55
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v54, 0x40c00000, v65 :: v_dual_lshlrev_b32 v65, 16, v30
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v64, 0x40c00000, v64
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v50.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v51, v51, v52, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v54, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v65, 0x40c00000, v65
-; GFX11-TRUE16-NEXT: v_bfe_u32 v53, v64, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v28
-; GFX11-TRUE16-NEXT: v_add3_u32 v52, v53, v64, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v64
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v64, 0x40c00000, v66
+; GFX11-TRUE16-NEXT: v_bfe_u32 v64, v52, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v54, 0x40c00000, v54
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v48.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v53, v55, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v53, v64, v52, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v52
+; GFX11-TRUE16-NEXT: v_bfe_u32 v64, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v54, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v51.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v52, v53, v55, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v53, v64, v28, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_add3_u32 v64, v65, v54, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v54
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v53, v55, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v29, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v54, 0x40c00000, v66
+; GFX11-TRUE16-NEXT: v_add3_u32 v55, v55, v29, 0x7fff
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v66, 16, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v30, v52, v53 :: v_dual_and_b32 v31, 0xffff0000, v31
-; GFX11-TRUE16-NEXT: v_add3_u32 v52, v55, v54, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v53, 0x400000, v54
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v53, v64, v65 :: v_dual_lshlrev_b32 v64, 16, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v29
+; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v54, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v64, 0x40c00000, v64 :: v_dual_cndmask_b32 v29, v55, v65
+; GFX11-TRUE16-NEXT: v_add3_u32 v55, v66, v54, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v54
+; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v30, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
-; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v65, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v54, 0x400000, v65
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v66, 0x40c00000, v66 :: v_dual_add_f32 v31, 0x40c00000, v31
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v52, v52, v53, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v53, v55, v65, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v55, v64, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v65, v65
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v66, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v53, v53, v54, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v54, v55, v64, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v55, 0x400000, v64
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v66
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v53
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v54, v54, v55, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v66, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v67, v64, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v53.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v54, v55, v65, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v55, v66, v30, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v30
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-TRUE16-NEXT: v_add3_u32 v66, v67, v64, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v55, v65, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v31, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v67, 0x40c00000, v67 :: v_dual_add_f32 v66, 0x40c00000, v68
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v68, 16, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v55, v64, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v31, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v31
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v67, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v64, 0x40c00000, v68
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v65, v31, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v55, v66, v67 :: v_dual_lshlrev_b32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v31
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v68, v64, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v55, v64, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v67, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v67
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v67, 0x40c00000, v69
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v66, 16, 1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v69, 16, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v31.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v55, v64, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v64, 0x400000, v66
-; GFX11-TRUE16-NEXT: v_add3_u32 v55, v65, v66, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v66, 0x40c00000, v66 :: v_dual_cndmask_b32 v31, v65, v67
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v68, v64, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v64
+; GFX11-TRUE16-NEXT: v_bfe_u32 v68, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v66, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v55.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v64, v65, v67, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v68, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add3_u32 v68, v69, v66, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v66
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v65, v67, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v1, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v66, v66
-; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v67, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v69, 0x40c00000, v69
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v55, v55, v64, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v67, 0x40c00000, v70
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v65, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v68, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v67, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-TRUE16-NEXT: v_add_f32_e32 v68, 0x40c00000, v68
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v55.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v65, v68, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v1
-; GFX11-TRUE16-NEXT: v_add3_u32 v64, v65, v68, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v68
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v68, 0x40c00000, v70
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v70, 16, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v64, v65 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-TRUE16-NEXT: v_add3_u32 v64, v66, v67, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v65, 0x400000, v67
-; GFX11-TRUE16-NEXT: v_bfe_u32 v66, v69, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v50.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v65, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v65, v70, v67, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v67
+; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v2, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v67, v67
-; GFX11-TRUE16-NEXT: v_bfe_u32 v67, v68, 16, 1
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v70, 0x40c00000, v70 :: v_dual_add_f32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v64, v64, v65, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v65, v66, v69, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v69
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v69, v69
-; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v70, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v65, v65, v66, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v66, v67, v68, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v67, 0x400000, v68
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v68, v68
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v70
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v65
+; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v68, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v67, v70, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v65, v65, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v70, v71, v68, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v68
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v66, v67, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v67, v69, v70, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v70, v70
-; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v71, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v67, v68, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v68, v69, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v67, v69, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v67, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v68, v68
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v69, 0x40c00000, v80
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v65.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v67, v67, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v70, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v69, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v67
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v68, v69, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v68, v70, v71, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v69, 0x400000, v71
-; GFX11-TRUE16-NEXT: v_bfe_u32 v70, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v71, v71
-; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v80, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v68, v69, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v69, v70, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v70, 0x40c00000, v70
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v52.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v67, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v67, v80, v69, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v69
+; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v69, v69
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v70, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v68.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v69, v80, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v67, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v4
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v68
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v69, v70, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v69, v71, v80, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v80
-; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v80, v80
-; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v81, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v69, v69, v70, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v70, v71, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v80, v81, v70, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v70
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v69, v71, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v69, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v70, v70
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v71, 0x40c00000, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v67.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v69, v69, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v80, v81, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v80, 16, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v71, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v69
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v70, v71, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v70, v80, v81, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v71, 0x400000, v81
-; GFX11-TRUE16-NEXT: v_bfe_u32 v80, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
-; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v82, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v70, v71, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v71, v80, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v6
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v71, v81, v82, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v82
-; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v83, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v71, v80, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v80, v81, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v80, 0x40c00000, v80 :: v_dual_cndmask_b32 v5, v69, v81
+; GFX11-TRUE16-NEXT: v_add3_u32 v69, v82, v71, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v71
+; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v71, v71
+; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v80, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v70.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v71, v82, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v69, v69, v81, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v82, v83, v80, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v80
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v81, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v71, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v80, v80
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v81, 0x40c00000, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v69.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v71, v71, v7, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v82, v83, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v82, 16, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v81, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v80, v81, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v80, v82, v83, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v81, 0x400000, v83
-; GFX11-TRUE16-NEXT: v_bfe_u32 v82, v8, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
-; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v80, v81, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v81, v82, v8, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v82, 0x40c00000, v82 :: v_dual_cndmask_b32 v7, v71, v83
+; GFX11-TRUE16-NEXT: v_add3_u32 v71, v84, v81, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v85, v82, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v80.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v81, v84, v8, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v71, v83, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v8
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v81, v82, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v81, v83, v84, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v84
-; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v9, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
-; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v85, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v81, v81, v82, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v82, v83, v9, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add3_u32 v84, v85, v82, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v82
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v81, v83, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v83, 0x40c00000, v86
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v71.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v81, v81, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v84, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 16, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v86, v83, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v82, v83, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v82, v84, v85, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v83, 0x400000, v85
-; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v10, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX11-TRUE16-NEXT: v_bfe_u32 v85, v86, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v9.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v81
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v82, v83, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v83, v84, v10, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v84, 0x40c00000, v84
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v54.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v81, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v81, v86, v83, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_bfe_u32 v86, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_bfe_u32 v87, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v82.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v83, v86, v10, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v81, v81, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v7, 16, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v83, v84, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v83, v85, v86, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v86
-; GFX11-TRUE16-NEXT: v_bfe_u32 v85, v11, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v86, 0x40c00000, v96
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h
-; GFX11-TRUE16-NEXT: v_add3_u32 v85, v85, v11, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v83, v84, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v84, v87, 16, 1
+; GFX11-TRUE16-NEXT: v_add3_u32 v86, v87, v84, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v84
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v83, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v85, 0x40c00000, v96
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v81.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v83, v83, v11, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v86, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v87, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v85, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-TRUE16-NEXT: v_bfe_u32 v99, v86, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v82
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v84, v84, v87, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v85, v96, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
-; GFX11-TRUE16-NEXT: v_add3_u32 v87, v99, v86, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v86
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v6, 16, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v11.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v84, v97, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v83
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v86, v87, v96, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v96, 16, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v64.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v83, v86, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v83, 0x40c00000, v87
+; GFX11-TRUE16-NEXT: v_add3_u32 v86, v96, v85, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v85
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
+; GFX11-TRUE16-NEXT: v_bfe_u32 v97, v83, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v84.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v85, v86, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v86, v96, v12, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v87, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_add3_u32 v96, v97, v83, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v97, 16, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v99, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v86, v87, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v86, 0x40c00000, v97
+; GFX11-TRUE16-NEXT: v_add3_u32 v87, v99, v13, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v85.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v96, v98, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v98, 16, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v97, v86, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v5, 16, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v70
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v96, 0x40c00000, v96 :: v_dual_add_f32 v15, 0x40c00000, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v5, 16, v6
-; GFX11-TRUE16-NEXT: v_bfe_u32 v101, v96, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v102, v15, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v113, 0x400000, v15
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v96
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v17, 16, v69
-; GFX11-TRUE16-NEXT: v_add3_u32 v101, v101, v96, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v102, v102, v15, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v66.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v27, 16, v55
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v51.h
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v31, 16, v66
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v12, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v97, 0x400000, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v27, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v38.h
-; GFX11-TRUE16-NEXT: v_add3_u32 v85, v98, v12, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v98, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v24
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v66.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v87, v96, vcc_lo
; GFX11-TRUE16-NEXT: v_add_f32_e32 v87, 0x40c00000, v98
-; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v13, 16, 1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v34.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-TRUE16-NEXT: v_add3_u32 v96, v97, v86, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v97, 0x400000, v86
+; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v86, v86
; GFX11-TRUE16-NEXT: v_bfe_u32 v99, v87, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v98, v98, v13, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v103, 0x400000, v87
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v99, v99, v87, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; GFX11-TRUE16-NEXT: v_bfe_u32 v100, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v83.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v86, v96, v97, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v97, v98, v14, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v98, v99, v87, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v87
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v15
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v98, v99, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v100, v100, v14, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v100, v112, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v96, v96, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v97, v100, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v14.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v102, v113, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v101, v114, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v96
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v99, v103, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v99, 0x400000, v13
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v3, 16, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v87
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v98, v99, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v4, 16, v14
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v13.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v85, v97, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v86
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v12.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v84
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v3, 16, v13
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v80
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v4, 16, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v7.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v3, 16, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v4, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v68
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v64.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v17, 16, v65
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v54.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v3, 16, v67
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v22, 16, v64
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v52.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v17, 16, v53
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v49.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v22, 16, v52
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v25
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v17, 16, v48
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v36.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v22, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v33.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v17.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v17, 16, v35
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v37, 16, v38
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v86.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v39, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v96, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v87.h
; GFX11-TRUE16-NEXT: .LBB104_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -230606,641 +236493,1242 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v64bf16_to_v64i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
-; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
-; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
-; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
-; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
-; GFX11-NEXT: .LBB105_2: ; %cmp.true
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v17
-; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v0
-; GFX11-NEXT: v_add_f32_e32 v0, 0x40c00000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v5, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v11, v3
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v6, v9, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v7, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_dual_add_f32 v3, 0x40c00000, v6 :: v_dual_add_nc_u32 v2, v2, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v1, v5 :: v_dual_lshlrev_b32 v5, 16, v19
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v4
-; GFX11-NEXT: v_dual_cndmask_b32 v33, v2, v6 :: v_dual_add_nc_u32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v19, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v34, v0, v1, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v20
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v35, v0, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v22
-; GFX11-NEXT: v_cndmask_b32_e32 v36, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v21, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v23
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v1, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v37, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v23, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v38, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_cndmask_b32 v24, v1, v3
-; GFX11-NEXT: v_dual_add_f32 v2, 0x40c00000, v7 :: v_dual_add_f32 v3, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v26
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v39, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v25, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v48, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v27
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v49, v1, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v49
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v28
-; GFX11-NEXT: v_dual_cndmask_b32 v26, v0, v6 :: v_dual_add_nc_u32 v1, v1, v4
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v28
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v50, v0, v1, vcc_lo
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v29
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v28, v1, v3 :: v_dual_lshlrev_b32 v5, 16, v29
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v7, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v51, v0, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v29, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v52, v0, v1 :: v_dual_add_nc_u32 v1, 0x7fff, v4
-; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v31
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v30, v1, v3 :: v_dual_add_f32 v3, 0x40c00000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v31
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v53, v0, v5 :: v_dual_add_f32 v0, 0x40c00000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v3
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v6, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v54, v1, v2 :: v_dual_add_nc_u32 v1, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v5, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v54
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v1, v2, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v1
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v55, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v6
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v4
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v64, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v10
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v65, v6, v7, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v65
-; GFX11-NEXT: v_cndmask_b32_e32 v66, v4, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v67, v6, v7, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v4
-; GFX11-NEXT: v_bfe_u32 v8, v10, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v67
-; GFX11-NEXT: v_cndmask_b32_e32 v68, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v10
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v9, v5
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v69, v6, v7, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v10, v6
-; GFX11-NEXT: v_bfe_u32 v10, v12, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v70, v7, v8, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v12
-; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v7
-; GFX11-NEXT: v_and_or_b32 v5, 0xffff0000, v69, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v11, v7
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v13
-; GFX11-NEXT: v_bfe_u32 v71, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v80, v8, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v13
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v9, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v71, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v11
-; GFX11-NEXT: v_bfe_u32 v71, v12, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v10 :: v_dual_add_nc_u32 v9, 0x7fff, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v10, v14, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v81, v9, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v71, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v14
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-NEXT: v_bfe_u32 v71, v11, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v14
-; GFX11-NEXT: v_bfe_u32 v83, v13, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v71, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v83, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v82, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v71, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v13
-; GFX11-NEXT: v_bfe_u32 v83, v15, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v71, v12, v71, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v83, v15
-; GFX11-NEXT: v_bfe_u32 v13, v84, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v83, 0x400000, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v14, v82, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v82, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v84
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_bfe_u32 v85, v14, 16, 1
-; GFX11-NEXT: v_bfe_u32 v86, v82, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v96, 0x400000, v82
-; GFX11-NEXT: v_dual_cndmask_b32 v83, v12, v83 :: v_dual_add_nc_u32 v12, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v84
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v85, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
-; GFX11-NEXT: v_add_nc_u32_e32 v85, v86, v82
-; GFX11-NEXT: v_or_b32_e32 v84, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v12, v12, v13 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v85, 0x7fff, v85
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: v_and_or_b32 v6, 0xffff0000, v70, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v84, v15, v84, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
-; GFX11-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v86, v13, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v82, v85, v96, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v85, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_bfe_u32 v15, v87, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_bfe_u32 v97, v85, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s27, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v99, v96, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v100, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v113, 0x400000, v96
-; GFX11-NEXT: v_bfe_u32 v101, v98, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v99, v99, v96
-; GFX11-NEXT: v_add_nc_u32_e32 v97, v97, v85
-; GFX11-NEXT: v_bfe_u32 v103, v100, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v101, v101, v98
-; GFX11-NEXT: v_or_b32_e32 v114, 0x400000, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
-; GFX11-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
-; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v85
-; GFX11-NEXT: v_add_nc_u32_e32 v101, 0x7fff, v101
-; GFX11-NEXT: v_add_nc_u32_e32 v103, v103, v100
-; GFX11-NEXT: v_cndmask_b32_e32 v96, v99, v113, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v87
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v86, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v103
-; GFX11-NEXT: v_or_b32_e32 v103, 0x400000, v100
-; GFX11-NEXT: v_cndmask_b32_e32 v98, v101, v114, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v87
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v86, 0x400000, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v85, v97, v112, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v100, v100
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v96
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v64, v65
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v55, v69
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v97, v99, v103, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v28
-; GFX11-NEXT: v_and_or_b32 v4, 0xffff0000, v68, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v87, v15, v102, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v66, v67
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v27
-; GFX11-NEXT: v_and_or_b32 v29, 0xffff0000, v52, v55
-; GFX11-NEXT: v_and_or_b32 v28, 0xffff0000, v51, v64
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v14, v86, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v14, 0xffff0000, v85, v96
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 16, v87
-; GFX11-NEXT: v_lshrrev_b32_e32 v87, 16, v11
-; GFX11-NEXT: v_and_or_b32 v27, 0xffff0000, v50, v65
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v98, 16, v98
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v82
-; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v10
-; GFX11-NEXT: v_and_or_b32 v10, 0xffff0000, v71, v87
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v81
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_and_or_b32 v30, 0xffff0000, v53, v54
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v22
-; GFX11-NEXT: v_and_or_b32 v25, 0xffff0000, v48, v49
-; GFX11-NEXT: v_and_or_b32 v24, 0xffff0000, v39, v50
-; GFX11-NEXT: v_and_or_b32 v23, 0xffff0000, v38, v51
-; GFX11-NEXT: v_and_or_b32 v22, 0xffff0000, v37, v52
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v17
-; GFX11-NEXT: v_and_or_b32 v15, 0xffff0000, v97, v98
-; GFX11-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v85
-; GFX11-NEXT: v_and_or_b32 v12, 0xffff0000, v84, v82
-; GFX11-NEXT: v_and_or_b32 v11, 0xffff0000, v83, v86
-; GFX11-NEXT: v_and_or_b32 v9, 0xffff0000, v9, v96
-; GFX11-NEXT: v_and_or_b32 v8, 0xffff0000, v8, v71
-; GFX11-NEXT: v_and_or_b32 v7, 0xffff0000, v80, v7
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v1, v68
-; GFX11-NEXT: v_and_or_b32 v31, 0xffff0000, v31, v70
-; GFX11-NEXT: v_and_or_b32 v26, 0xffff0000, v26, v66
-; GFX11-NEXT: v_and_or_b32 v21, 0xffff0000, v21, v53
-; GFX11-NEXT: v_and_or_b32 v20, 0xffff0000, v35, v36
-; GFX11-NEXT: v_and_or_b32 v19, 0xffff0000, v34, v37
-; GFX11-NEXT: v_and_or_b32 v18, 0xffff0000, v33, v38
-; GFX11-NEXT: v_and_or_b32 v17, 0xffff0000, v32, v39
-; GFX11-NEXT: v_and_or_b32 v16, 0xffff0000, v16, v48
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: s_branch .LBB105_2
-; GFX11-NEXT: .LBB105_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v64i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-TRUE16-NEXT: .LBB105_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v3, 16, v17
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_cndmask_b32 v16, v5, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v11, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v0, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v19
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v18, v5, v7 :: v_dual_and_b32 v7, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v33.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v7 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v34.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_lshlrev_b32 v6, 16, v21
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v35.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v32.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v23
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v36.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v3 :: v_dual_add_f32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_cndmask_b32 v37, v0, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v37.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v24
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v24
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v7 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v25
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v38.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v24, v4, v7 :: v_dual_and_b32 v7, 0xffff0000, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v26
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v48, v0, v1 :: v_dual_add_f32 v1, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v48.h
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v25, v4, v8 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_cndmask_b32 v49, v0, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v49.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v26, v4, v7 :: v_dual_and_b32 v7, 0xffff0000, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v28
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v50, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v7 :: v_dual_add_nc_u32 v0, v5, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v29
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v50.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v51, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v51.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v30
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v7, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v52, v0, v4, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v52.h
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v31
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v53, v2, v6 :: v_dual_lshlrev_b32 v2, 16, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v53.h
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v54, v0, v4 :: v_dual_add_nc_u32 v1, v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v64, v0, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v0, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v1, v3 :: v_dual_add_nc_u32 v1, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v55, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v64.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v2, v8 :: v_dual_add_nc_u32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v65, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v2, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v3, v7 :: v_dual_add_nc_u32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v54.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v66.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v4, v9 :: v_dual_add_nc_u32 v4, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v67.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v68.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v11 :: v_dual_add_nc_u32 v6, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v69, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v6, v7, v11 :: v_dual_add_nc_u32 v7, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v7, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v69.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v70.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v13 :: v_dual_add_nc_u32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v80, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v12
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v81, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v9, v13 :: v_dual_add_nc_u32 v9, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v80, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v80
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v81, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v9, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v80
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v71.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v81
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v82.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v15 :: v_dual_add_nc_u32 v10, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v80, v80
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v83, v10, v13
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v84, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v15, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v83
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v81, v81, v84
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v86, v12, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v83, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v84
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v81, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v13, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v81, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v81
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v81
+; GFX11-TRUE16-NEXT: v_bfe_u32 v83, v87, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v85, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v83, v83, v87
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v84.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v85, v13, v85, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v81, v81
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v86.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v85.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v13, v14, v96 :: v_dual_add_nc_u32 v14, 0x7fff, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v83, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v87
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v97, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-TRUE16-NEXT: v_bfe_u32 v98, v83, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v81, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v100, v97, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v14, v96, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v98, v83
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v99, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v83
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, v100, v97
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v83, v83
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v81, v81, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v96, v99, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v98, 0x7fff, v98
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v97
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v14, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v97, v97
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v81, 0x7fff, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v96, v96, v99
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v101, 0x400000, v99
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v98, v102, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v87.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v96, 0x7fff, v96
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v80.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v65.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v81, v100, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v99, v99
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v83.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v55.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v96, v101, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v97.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB105_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB105_2
+; GFX11-TRUE16-NEXT: .LBB105_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v64i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v30, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-FAKE16-NEXT: .LBB105_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v11, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v7, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 0x40c00000, v6 :: v_dual_add_nc_u32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v1, v5 :: v_dual_lshlrev_b32 v5, 16, v19
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v4
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v33, v2, v6 :: v_dual_add_nc_u32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v19, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v34, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v21
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_add_f32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v35, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v22
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v36, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v21, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v23
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v37, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v23, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v38, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v24
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v25
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v25
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 0x40c00000, v5 :: v_dual_cndmask_b32 v24, v1, v3
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v7 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v26
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v39, v0, v6 :: v_dual_add_nc_u32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v25, v0, v1 :: v_dual_add_nc_u32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v48, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v27
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v49, v1, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v49
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v28
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v26, v0, v6 :: v_dual_add_nc_u32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v28
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v50, v0, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v29
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v28, v1, v3 :: v_dual_lshlrev_b32 v5, 16, v29
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v7, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v51, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v29, v0, v1 :: v_dual_lshlrev_b32 v6, 16, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v52, v0, v1 :: v_dual_add_nc_u32 v1, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v31
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v30, v1, v3 :: v_dual_add_f32 v3, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v31
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v53, v0, v5 :: v_dual_add_f32 v0, 0x40c00000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v6, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v54, v1, v2 :: v_dual_add_nc_u32 v1, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v54
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v55, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_add_nc_u32 v1, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v64, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v65, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v65
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v66, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v67, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v67
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v68, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v9, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v69, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v10, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v70, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, 0xffff0000, v69, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v11, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v71, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v80, v8, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v9, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v71, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v71, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v8, v10 :: v_dual_add_nc_u32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v81, v9, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v71, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v14
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v71, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v83, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v71, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v83, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v82, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v71, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v83, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v84, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v71, v12, v71, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v83, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v84, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v83, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v14, v82, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v82, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v84
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v85, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v86, v82, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v82
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v83, v12, v83 :: v_dual_add_nc_u32 v12, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v84
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v85, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v84, v84
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v85, v86, v82
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v12, v12, v13 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v85, 0x7fff, v85
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, 0xffff0000, v70, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v84, v15, v84, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v82, v82
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v87, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v86, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v82, v85, v96, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v85, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v87, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v96, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v97, v85, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v98, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v99, v96, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v100, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v113, 0x400000, v96
+; GFX11-FAKE16-NEXT: v_bfe_u32 v101, v98, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v96, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, v99, v96
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, v97, v85
+; GFX11-FAKE16-NEXT: v_bfe_u32 v103, v100, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v101, v101, v98
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v99
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v97, 0x7fff, v97
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v85
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v101, 0x7fff, v101
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v103, v103, v100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v96, v99, v113, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v98, v98
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v87
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v86, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v99, 0x7fff, v103
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v103, 0x400000, v100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v98, v101, v114, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v85, v85
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v87
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v85, v97, v112, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v100, v100
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v96
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v64, v65
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v55, v69
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v97, v99, v103, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v87, v87
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v28
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, 0xffff0000, v68, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v87, v15, v102, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v66, v67
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v27
+; GFX11-FAKE16-NEXT: v_and_or_b32 v29, 0xffff0000, v52, v55
+; GFX11-FAKE16-NEXT: v_and_or_b32 v28, 0xffff0000, v51, v64
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v14, v86, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_or_b32 v14, 0xffff0000, v85, v96
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v85, 16, v87
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v87, 16, v11
+; GFX11-FAKE16-NEXT: v_and_or_b32 v27, 0xffff0000, v50, v65
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v98, 16, v98
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v82
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v86, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v96, 16, v10
+; GFX11-FAKE16-NEXT: v_and_or_b32 v10, 0xffff0000, v71, v87
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v81
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: v_and_or_b32 v30, 0xffff0000, v53, v54
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v22
+; GFX11-FAKE16-NEXT: v_and_or_b32 v25, 0xffff0000, v48, v49
+; GFX11-FAKE16-NEXT: v_and_or_b32 v24, 0xffff0000, v39, v50
+; GFX11-FAKE16-NEXT: v_and_or_b32 v23, 0xffff0000, v38, v51
+; GFX11-FAKE16-NEXT: v_and_or_b32 v22, 0xffff0000, v37, v52
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v17
+; GFX11-FAKE16-NEXT: v_and_or_b32 v15, 0xffff0000, v97, v98
+; GFX11-FAKE16-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v85
+; GFX11-FAKE16-NEXT: v_and_or_b32 v12, 0xffff0000, v84, v82
+; GFX11-FAKE16-NEXT: v_and_or_b32 v11, 0xffff0000, v83, v86
+; GFX11-FAKE16-NEXT: v_and_or_b32 v9, 0xffff0000, v9, v96
+; GFX11-FAKE16-NEXT: v_and_or_b32 v8, 0xffff0000, v8, v71
+; GFX11-FAKE16-NEXT: v_and_or_b32 v7, 0xffff0000, v80, v7
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v1, v68
+; GFX11-FAKE16-NEXT: v_and_or_b32 v31, 0xffff0000, v31, v70
+; GFX11-FAKE16-NEXT: v_and_or_b32 v26, 0xffff0000, v26, v66
+; GFX11-FAKE16-NEXT: v_and_or_b32 v21, 0xffff0000, v21, v53
+; GFX11-FAKE16-NEXT: v_and_or_b32 v20, 0xffff0000, v35, v36
+; GFX11-FAKE16-NEXT: v_and_or_b32 v19, 0xffff0000, v34, v37
+; GFX11-FAKE16-NEXT: v_and_or_b32 v18, 0xffff0000, v33, v38
+; GFX11-FAKE16-NEXT: v_and_or_b32 v17, 0xffff0000, v32, v39
+; GFX11-FAKE16-NEXT: v_and_or_b32 v16, 0xffff0000, v16, v48
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB105_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB105_2
+; GFX11-FAKE16-NEXT: .LBB105_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
index 582f31b..c6211aa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
@@ -3090,108 +3090,206 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v4i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v4i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v4i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7531,108 +7629,206 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v4f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v4f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v4f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11622,108 +11818,206 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v2i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v2i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v2i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15292,108 +15586,206 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v2f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v3
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v5, v8, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v2f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v10, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v1, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v5.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v2f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v7, v9 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v13 :: v_dual_add_nc_u32 v10, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v7, 16, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18154,83 +18546,75 @@ define <8 x i16> @bitcast_v8bf16_to_v8i16(<8 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB94_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v1.l
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v7 :: v_dual_add_f32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v11, v8, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v12, v12, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: v_add3_u32 v8, v11, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v7, v8 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 0x40c00000, v4 :: v_dual_add_f32 v2, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v3.l
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v12, v13, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v9, v11, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v12, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v13, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v11, v12 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v7, v10, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v9, v10, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_add3_u32 v13, v14, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v11, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v9, v15, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v10, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v7
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v0, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v1.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v1, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v4, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v9, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -18580,104 +18964,191 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v8i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v2
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v10, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v5
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v5, v4
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v6, v8
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v8i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v8 :: v_dual_add_nc_u32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v8i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v10, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v5, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v6, v8
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21477,112 +21948,210 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v8f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v2
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v10, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v5
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v3, v3, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v6, 16, v7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v8f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v10, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v7, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v5, v11 :: v_dual_add_nc_u32 v10, v10, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v12, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v8.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v11 :: v_dual_add_nc_u32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v7.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v8f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v10, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v3, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v6, 16, v7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24345,152 +24914,299 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v12, v16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v8bf16_to_v16i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s11, s3, 24
-; GFX11-NEXT: s_lshr_b32 s18, s3, 16
-; GFX11-NEXT: s_lshr_b32 s14, s3, 8
-; GFX11-NEXT: s_lshr_b32 s16, s2, 16
-; GFX11-NEXT: s_lshr_b32 s15, s2, 8
-; GFX11-NEXT: s_lshr_b32 s9, s1, 24
-; GFX11-NEXT: s_lshr_b32 s17, s1, 16
-; GFX11-NEXT: s_lshr_b32 s10, s1, 8
-; GFX11-NEXT: s_lshr_b32 s13, s0, 16
-; GFX11-NEXT: s_lshr_b32 s12, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v10, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v11
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v10, v14, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 24, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v10
-; GFX11-NEXT: v_lshl_or_b32 v9, v3, 16, v7
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: s_branch .LBB109_5
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
-; GFX11-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
-; GFX11-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
-; GFX11-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
-; GFX11-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
-; GFX11-NEXT: v_mov_b32_e32 v11, s6
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB109_5: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v4, v17
-; GFX11-NEXT: v_mov_b32_e32 v12, v16
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v8bf16_to_v16i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s3, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s18, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s3, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s16, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s17, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s4, 0, s3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s2, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v2 :: v_dual_add_nc_u32 v1, v8, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v14.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v16.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v3.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: .LBB109_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr17
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr16
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr18
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: s_branch .LBB109_2
+; GFX11-TRUE16-NEXT: .LBB109_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v17
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v16
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v8bf16_to_v16i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s3, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s18, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s3, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s16, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s17, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v10, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v10, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v14, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v3, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: .LBB109_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr17
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr16
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr18
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: s_branch .LBB109_2
+; GFX11-FAKE16-NEXT: .LBB109_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v17
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v16
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index 0a73571..01e397d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -4485,203 +4485,384 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v8i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v8i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v8i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11456,203 +11637,384 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v8f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v8f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v8f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17995,203 +18357,384 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v4i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v4i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v4i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23982,203 +24525,384 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v4f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s8, s7, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
-; GFX11-NEXT: s_and_b32 s8, s6, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s7, s5, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s4, s4, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s4, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v9, v10
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
-; GFX11-NEXT: s_and_b32 s3, s2, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v6, v8
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v10
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v8
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v3, v9, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
-; GFX11-NEXT: s_and_b32 s2, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v9
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v11, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_bfe_u32 v15, v9, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v13
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v8, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v4f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v6, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v3.l
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v11, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v3, v9, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v10, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v13, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v14, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v10, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v1, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v14.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v10
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v4f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s4, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v8, v4 :: v_dual_add_nc_u32 v4, v9, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v9, v10
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_and_b32 s3, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v6, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_and_b32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v8, v9, v10 :: v_dual_add_nc_u32 v9, v11, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v15, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v8, 16, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v12, 16, v9
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28722,13 +29446,10 @@ define <16 x i16> @bitcast_v16bf16_to_v16i16(<16 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB94_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 0x40c00000, v9 :: v_dual_add_f32 v8, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 0x40c00000, v9 :: v_dual_lshlrev_b32 v8, 16, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v9, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v8, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v8
@@ -28736,142 +29457,128 @@ define <16 x i16> @bitcast_v16bf16_to_v16i16(<16 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v9
; GFX11-TRUE16-NEXT: v_add3_u32 v14, v14, v9, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v8, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v11, v12 :: v_dual_and_b32 v1, 0xffff0000, v1
; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v10, 16, v2
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_dual_add_f32 v12, 0x40c00000, v12 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v0, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v0
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v13, v13, v0, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v13, v15, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v1
; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v8.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v14, v16, vcc_lo
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v10, 16, 1
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.h
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v14, v10, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v2, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v11, v13, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v14, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v2
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-TRUE16-NEXT: v_add3_u32 v14, v15, v12, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v10.h
; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v11, v15, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v14, 0x40c00000, v14 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v11, v18, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_add_f32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v11, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v11, 0x40c00000, v15
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v16
-; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v18, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.h
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v13, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v13
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v15, v16, v13, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v16, v14, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v14, 0x40c00000, v21 :: v_dual_cndmask_b32 v11, v16, v19
-; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v14, v16, v13, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v14, v16, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v16, v17, v11, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v14, v15 :: v_dual_lshlrev_b32 v17, 16, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add3_u32 v15, v19, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v13.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v16, v18 :: v_dual_lshlrev_b32 v18, 16, v7
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v14, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v15, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v15, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v20, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v14, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v19, v21, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v22, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v23, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v18
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v23, v24, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v7
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v21, v22, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v23, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v16, v17, v14, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v11.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v16, v16, v7, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v17, v18, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v18, v19, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v18, v19, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v7.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v19, v25, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v18
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v16, v20, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v0, 16, v7
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v11
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v15, v17, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v1, 16, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v2, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v2, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v11, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v12, 16, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v17, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v14.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v16, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -29497,175 +30204,334 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v16i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s8, s0, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: s_and_b32 s8, s1, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: s_and_b32 s1, s2, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: s_and_b32 s1, s5, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v8, v6, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v8, v6
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v5, v3, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v7
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s3, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v10, v4
-; GFX11-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v3, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_and_b32 s0, s4, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v12, v5
-; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s4, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v6, v10, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v12, v7
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: s_lshl_b32 s0, s5, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v14, v10
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v10
-; GFX11-NEXT: v_bfe_u32 v15, v11, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v5, v12, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v15, v11
-; GFX11-NEXT: s_and_b32 s0, s6, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v16, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v13, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v7, v17, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s6, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v5
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s7, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v14, v18 :: v_dual_add_nc_u32 v14, 0x7fff, v16
-; GFX11-NEXT: v_bfe_u32 v16, v7, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v20, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v7
-; GFX11-NEXT: s_and_b32 s0, s7, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v22, v19, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v14, v18 :: v_dual_add_nc_u32 v14, v20, v17
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v22, v19
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_bfe_u32 v18, v21, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v14, v23, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v20, v24, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v22, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_and_or_b32 v5, 0xffff0000, v11, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v19
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v6, v15, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v6, 0xffff0000, v16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX11-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v17
-; GFX11-NEXT: v_and_or_b32 v4, 0xffff0000, v13, v10
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v8, v12
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v9, v14
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v15
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v16i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v8.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v2, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v10.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v3, v4 :: v_dual_add_nc_u32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s4, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v15
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v12.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v5, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v14.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v13.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v16 :: v_dual_add_nc_u32 v6, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v11, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v6, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v18, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v20, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v6, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v19
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v18, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v11.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v16, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v17.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v16i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v3, v8 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v8, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v5, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v4, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v10, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v3, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v12, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s4, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v12, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v14, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v5, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v15, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v7, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v14, v18 :: v_dual_add_nc_u32 v14, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v22, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v14, v18 :: v_dual_add_nc_u32 v14, v20, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v22, v19
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v21, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v14, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v20, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v16, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, 0xffff0000, v11, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v19
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v3, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v6, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, 0xffff0000, v16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v17
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, 0xffff0000, v13, v10
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v8, v12
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v9, v14
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v15
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32462,177 +33328,351 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-NEXT: s_branch .LBB99_2
;
-; GFX11-LABEL: bitcast_v32i8_to_v16i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
-; GFX11-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v12
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_3
-; GFX11-NEXT: .LBB99_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v16
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v12
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v17
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v11, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v21, v2
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v13, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_or_b32_e32 v3, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-NEXT: v_or_b32_e32 v6, v14, v6
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e64 v7, 0xffff, s4
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB99_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB99_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB99_2
+; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v10
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v7, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v13
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v3, v21
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-TRUE16-NEXT: .LBB99_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_and_b32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v20, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v19, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v1.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v14, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: .LBB99_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB99_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-TRUE16-NEXT: s_branch .LBB99_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v16
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v4, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v9
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v23
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-FAKE16-NEXT: .LBB99_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v17
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v20, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v19, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v14, v6
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v7, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB99_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB99_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-FAKE16-NEXT: s_branch .LBB99_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34430,192 +35470,369 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v16f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s7, s19
-; GFX11-NEXT: s_mov_b32 s6, s18
-; GFX11-NEXT: s_mov_b32 s5, s17
-; GFX11-NEXT: s_mov_b32 s4, s16
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s8, s0, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: s_and_b32 s8, s2, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s4, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v9, v5
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v6, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s3, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v3, v4 :: v_dual_add_nc_u32 v4, v6, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s0, s3, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s4, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v4
-; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_and_b32 s0, s5, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v7
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v9, v10, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s0, s5, 16
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v5
-; GFX11-NEXT: v_bfe_u32 v5, v12, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_bfe_u32 v13, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v12
-; GFX11-NEXT: s_and_b32 s0, s6, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v14, v15, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s6, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v6
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v10 :: v_dual_add_nc_u32 v10, v14, v15
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s7, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v18, v14, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: s_and_b32 s0, s7, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v13, v16, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v15
-; GFX11-NEXT: v_bfe_u32 v20, v17, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v18, v14
-; GFX11-NEXT: v_bfe_u32 v13, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v20, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v10, v10, v16 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v12
-; GFX11-NEXT: v_bfe_u32 v16, v19, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v15, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v16, v16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v18, v22, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v16
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v13, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v16, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshl_or_b32 v6, v12, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v7, v13, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v4, v17, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v3, v9, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v2, v11, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v8, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v14
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v16f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s7, s19
+; GFX11-TRUE16-NEXT: s_mov_b32 s6, s18
+; GFX11-TRUE16-NEXT: s_mov_b32 s5, s17
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, s16
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s8, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s5, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s7, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v6, v2 :: v_dual_add_nc_u32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v17
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v2, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s4, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v6, v7 :: v_dual_add_nc_u32 v4, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v7, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v11.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v14.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v9.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v15, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v13.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v12, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v20, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v8.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v10.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v15.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v16f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s7, s19
+; GFX11-FAKE16-NEXT: s_mov_b32 s6, s18
+; GFX11-FAKE16-NEXT: s_mov_b32 s5, s17
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, s16
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s8, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s4, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v3, v4 :: v_dual_add_nc_u32 v4, v6, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s3, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s4, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s5, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s5, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v12
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s6, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s6, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v5, v10 :: v_dual_add_nc_u32 v10, v14, v15
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s7, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s7, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v13, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v18, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v20, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v10, v10, v16 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v15, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v16, v16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v18, v22, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v13, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v12, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v13, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v17, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v9, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v11, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v8, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v14
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37354,177 +38571,351 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-NEXT: s_branch .LBB107_2
;
-; GFX11-LABEL: bitcast_v32i8_to_v16f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
-; GFX11-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v12
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB107_3
-; GFX11-NEXT: .LBB107_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v16
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v12
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v17
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v11, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v21, v2
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v13, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_or_b32_e32 v3, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-NEXT: v_or_b32_e32 v6, v14, v6
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e64 v7, 0xffff, s4
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB107_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB107_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB107_2
+; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v10
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v7, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v13
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v3, v21
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-TRUE16-NEXT: .LBB107_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_and_b32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v20, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v19, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v1.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v14, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: .LBB107_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB107_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-TRUE16-NEXT: s_branch .LBB107_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v16
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v4, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v9
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v23
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-FAKE16-NEXT: .LBB107_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v17
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v20, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v19, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v14, v6
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v7, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB107_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB107_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-FAKE16-NEXT: s_branch .LBB107_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -39446,281 +40837,552 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v28, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v16bf16_to_v32i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s27, s19, 24
-; GFX11-NEXT: s_lshr_b32 s46, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s19, 8
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s18, 8
-; GFX11-NEXT: s_lshr_b32 s23, s17, 24
-; GFX11-NEXT: s_lshr_b32 s45, s17, 16
-; GFX11-NEXT: s_lshr_b32 s26, s17, 8
-; GFX11-NEXT: s_lshr_b32 s29, s16, 16
-; GFX11-NEXT: s_lshr_b32 s28, s16, 8
-; GFX11-NEXT: s_lshr_b32 s15, s3, 24
-; GFX11-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-NEXT: s_lshr_b32 s22, s3, 8
-; GFX11-NEXT: s_lshr_b32 s25, s2, 16
-; GFX11-NEXT: s_lshr_b32 s24, s2, 8
-; GFX11-NEXT: s_lshr_b32 s13, s1, 24
-; GFX11-NEXT: s_lshr_b32 s43, s1, 16
-; GFX11-NEXT: s_lshr_b32 s14, s1, 8
-; GFX11-NEXT: s_lshr_b32 s21, s0, 16
-; GFX11-NEXT: s_lshr_b32 s20, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
-; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s4, s1, 16
-; GFX11-NEXT: s_and_b32 s1, s1, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s4, s0, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: s_and_b32 s1, s3, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s0, s2, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, v10, v3
-; GFX11-NEXT: v_bfe_u32 v10, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v35
-; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v0, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v4, v11 :: v_dual_add_nc_u32 v12, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v10, v7
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v9, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s2, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v9
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v4
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v14, 16, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v13, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v7, v8 :: v_dual_add_nc_u32 v7, v12, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v13, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v13, v15, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v15
-; GFX11-NEXT: v_bfe_u32 v13, v17, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v13, v17
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_bfe_u32 v18, v13, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v12, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v20, v16, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v18, v13
-; GFX11-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v20, v16
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v4
-; GFX11-NEXT: v_bfe_u32 v17, v19, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v23, 0x400000, v16
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v21, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v17, v17, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v18, v23, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v17
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v12, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v18, v22, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v15, v17, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v32
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 24, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v26, v30, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v17, v5, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v25, v7, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 24, v26
-; GFX11-NEXT: v_lshrrev_b64 v[19:20], 24, v[17:18]
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b64 v[27:28], 24, v[25:26]
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 8, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 8, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 8, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 24, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: s_branch .LBB109_5
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
-; GFX11-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
-; GFX11-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
-; GFX11-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
-; GFX11-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
-; GFX11-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
-; GFX11-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
-; GFX11-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
-; GFX11-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
-; GFX11-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
-; GFX11-NEXT: v_mov_b32_e32 v27, s10
-; GFX11-NEXT: v_mov_b32_e32 v19, s8
-; GFX11-NEXT: v_mov_b32_e32 v11, s6
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
-; GFX11-NEXT: .LBB109_5: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v4, v35
-; GFX11-NEXT: v_mov_b32_e32 v12, v34
-; GFX11-NEXT: v_mov_b32_e32 v20, v33
-; GFX11-NEXT: v_mov_b32_e32 v28, v32
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v16bf16_to_v32i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s27, s19, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s19, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s18, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s23, s17, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s26, s17, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s29, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s28, s16, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s3, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s22, s3, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s25, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s24, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s21, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s20, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s3, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s2, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v10, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v19, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v35.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, v20, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v4
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v9 :: v_dual_add_nc_u32 v4, v5, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v34.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v4 :: v_dual_add_nc_u32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v14.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v11.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v8, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v8.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v7, v12 :: v_dual_add_nc_u32 v12, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v22.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v15, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v12, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v13, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v33.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v30.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v20, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v32.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v13
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[19:20], 24, v[17:18]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v24.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 8, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v5.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[27:28], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_branch .LBB109_5
+; GFX11-TRUE16-NEXT: .LBB109_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr20
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr27
+; GFX11-TRUE16-NEXT: s_branch .LBB109_2
+; GFX11-TRUE16-NEXT: .LBB109_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v27, s10
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v19, s8
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: .LBB109_5: ; %end
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v35
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v34
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v33
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v32
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v16bf16_to_v32i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s27, s19, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s19, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s18, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s23, s17, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s26, s17, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s29, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s28, s16, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s3, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s22, s3, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s25, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s24, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s21, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s20, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s3, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s2, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, v10, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v35
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v4, v11 :: v_dual_add_nc_u32 v12, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v10, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v9, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s2, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v34
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v14, 16, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v7, v8 :: v_dual_add_nc_u32 v7, v12, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v13, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v17
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v13, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v13, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v18, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v19, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v20, v16
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v19, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v21, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, v17, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v18, v23, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v12, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v22, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v15, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v32
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 24, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v5, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v7, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 24, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[19:20], 24, v[17:18]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[9:10]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[27:28], 24, v[25:26]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 8, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 8, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 8, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 24, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 8, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: s_branch .LBB109_5
+; GFX11-FAKE16-NEXT: .LBB109_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr20
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr22
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr27
+; GFX11-FAKE16-NEXT: s_branch .LBB109_2
+; GFX11-FAKE16-NEXT: .LBB109_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v27, s10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v19, s8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-FAKE16-NEXT: .LBB109_5: ; %end
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v35
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v34
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v20, v33
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v28, v32
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -41235,177 +42897,351 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-NEXT: s_branch .LBB111_2
;
-; GFX11-LABEL: bitcast_v32i8_to_v16bf16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
-; GFX11-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v12
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB111_3
-; GFX11-NEXT: .LBB111_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v16
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v12
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v17
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v11, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v21, v2
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v13, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_or_b32_e32 v3, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-NEXT: v_or_b32_e32 v6, v14, v6
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e64 v7, 0xffff, s4
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v4, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v6, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB111_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB111_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB111_2
+; GFX11-TRUE16-LABEL: bitcast_v32i8_to_v16bf16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v10
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v15
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v7, v11
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v13
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v3, v21
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-TRUE16-NEXT: .LBB111_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_and_b32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v20, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v19, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v1.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_and_b32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v14, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: .LBB111_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB111_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-TRUE16-NEXT: s_branch .LBB111_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v32i8_to_v16bf16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v17, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, v4 :: v_dual_mov_b32 v15, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v14, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v20, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v16
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v4, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v9
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v7, 16, v23
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-FAKE16-NEXT: .LBB111_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v16
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v17
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v11, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v21, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v20, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v13, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v19, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v14, v6
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v7, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB111_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB111_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+; GFX11-FAKE16-NEXT: s_branch .LBB111_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index b622e6e..9041f64 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -19562,212 +19562,421 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_branch .LBB51_2
;
-; GFX11-LABEL: bitcast_v40i8_to_v20i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
-; GFX11-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
-; GFX11-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 8, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v22
-; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s10
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v24
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v28
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
-; GFX11-NEXT: v_or_b32_e32 v2, v3, v31
-; GFX11-NEXT: v_or_b32_e32 v3, v5, v34
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v27
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v33
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v25
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v18
-; GFX11-NEXT: v_or_b32_e32 v9, v6, v35
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v16
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v36
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v37
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v20
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
-; GFX11-NEXT: v_or_b32_e32 v12, v6, v17
-; GFX11-NEXT: v_lshl_or_b32 v6, v0, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v32
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v7
-; GFX11-NEXT: v_or_b32_e32 v10, v10, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v9, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v8, v12, 16, v13
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_lshl_or_b32 v9, v10, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v27
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v28
-; GFX11-NEXT: v_or_b32_e32 v4, v36, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v26
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v29
-; GFX11-NEXT: v_or_b32_e32 v5, v35, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v33, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v18
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v25
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v20
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
-; GFX11-NEXT: v_or_b32_e32 v5, v34, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v19, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v37, v2
-; GFX11-NEXT: v_or_b32_e32 v7, v32, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v22, v4
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_or_b32_e32 v1, v21, v1
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-NEXT: v_or_b32_e32 v6, v31, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_and_b32_e64 v10, 0xffff, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v5, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v6, v11, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v7, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB51_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-LABEL: bitcast_v40i8_to_v20i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v31
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v9, v19
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v1, v36
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v21
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v37
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v36, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v24
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v34, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v32, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v28
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v11.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v10.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v22, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v35, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i8_to_v20i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v2, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v30
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v24
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v3, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v5, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v6, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v8, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, v6, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v32
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v9, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v12, 16, v13
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v10, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v28
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v36, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v26
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v7
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v29
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v35, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v34, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v32, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v22, v4
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v31, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v10, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v11, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB51_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB51_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-FAKE16-NEXT: s_branch .LBB51_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25600,212 +25809,421 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_branch .LBB63_2
;
-; GFX11-LABEL: bitcast_v40i8_to_v20f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
-; GFX11-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
-; GFX11-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
-; GFX11-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 8, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v22
-; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s10
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v24
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v28
-; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
-; GFX11-NEXT: v_or_b32_e32 v2, v3, v31
-; GFX11-NEXT: v_or_b32_e32 v3, v5, v34
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v27
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v33
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v25
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v18
-; GFX11-NEXT: v_or_b32_e32 v9, v6, v35
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v16
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v36
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v37
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v20
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
-; GFX11-NEXT: v_or_b32_e32 v12, v6, v17
-; GFX11-NEXT: v_lshl_or_b32 v6, v0, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v32
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v7
-; GFX11-NEXT: v_or_b32_e32 v10, v10, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v9, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v8, v12, 16, v13
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_lshl_or_b32 v9, v10, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v27
-; GFX11-NEXT: s_add_i32 s28, s28, 3
-; GFX11-NEXT: s_lshl_b32 s5, s29, 8
-; GFX11-NEXT: s_and_b32 s4, s28, 0xff
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_or_b32 s4, s5, s4
-; GFX11-NEXT: s_and_b32 s5, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-NEXT: s_add_i32 s26, s26, 3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: s_or_b32 s5, s6, s5
-; GFX11-NEXT: s_and_b32 s6, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s27, 8
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: s_or_b32 s6, s7, s6
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 3, v28
-; GFX11-NEXT: v_or_b32_e32 v4, v36, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 3, v26
-; GFX11-NEXT: s_or_b32 s7, s8, s7
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-NEXT: s_add_i32 s16, s16, 3
-; GFX11-NEXT: s_or_b32 s8, s9, s8
-; GFX11-NEXT: s_and_b32 s9, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s17, 8
-; GFX11-NEXT: s_add_i32 s18, s18, 3
-; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_add_i32 s2, s2, 3
-; GFX11-NEXT: s_or_b32 s9, s10, s9
-; GFX11-NEXT: s_and_b32 s10, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s19, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v24
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v7
-; GFX11-NEXT: s_or_b32 s10, s11, s10
-; GFX11-NEXT: s_or_b32 s0, s1, s0
-; GFX11-NEXT: s_or_b32 s1, s3, s2
-; GFX11-NEXT: s_addk_i32 s5, 0x300
-; GFX11-NEXT: s_addk_i32 s6, 0x300
-; GFX11-NEXT: s_addk_i32 s9, 0x300
-; GFX11-NEXT: s_addk_i32 s10, 0x300
-; GFX11-NEXT: s_addk_i32 s0, 0x300
-; GFX11-NEXT: s_addk_i32 s1, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 3, v29
-; GFX11-NEXT: v_or_b32_e32 v5, v35, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v33, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 3, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s9, s10
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v18
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v25
-; GFX11-NEXT: s_addk_i32 s7, 0x300
-; GFX11-NEXT: s_addk_i32 s8, 0x300
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v20
-; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
-; GFX11-NEXT: v_or_b32_e32 v5, v34, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 3, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_or_b32_e32 v0, v19, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v37, v2
-; GFX11-NEXT: v_or_b32_e32 v7, v32, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, v22, v4
-; GFX11-NEXT: s_addk_i32 s4, 0x300
-; GFX11-NEXT: v_or_b32_e32 v1, v21, v1
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-NEXT: v_or_b32_e32 v6, v31, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-NEXT: v_and_b32_e64 v10, 0xffff, s4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v5, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v6, v11, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v7, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v3, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: .LBB63_3: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_4:
-; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB63_2
+; GFX11-TRUE16-LABEL: bitcast_v40i8_to_v20f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v31
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v9, v19
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v1, v36
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v21
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v34
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v25
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v37
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v29
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v36, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v24
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-TRUE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v6
+; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v10
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v34, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v23
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v32, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v28
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v11.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v10.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v22, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v35, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-TRUE16-NEXT: .LBB63_3: ; %end
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB63_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-TRUE16-NEXT: s_branch .LBB63_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v40i8_to_v20f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, v14 :: v_dual_mov_b32 v28, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, v4 :: v_dual_mov_b32 v29, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v22, 8, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v32, 8, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v34, 8, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v33, 8, v9
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v36, 8, v11
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v35, 8, v13
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v37, 8, v15
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v19, 8, v19
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v21
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v2, 0xffff, s10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v30
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v24
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v3, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v5, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, v6, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v5, v36
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v7, v37
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v20
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, v8, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, v6, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v0, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v32
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v9, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v12, 16, v13
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v10, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v27
+; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_add_i32 s24, s24, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s25, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s26, s26, 3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s27, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s20, s20, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s22, s22, 3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 3, v28
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v36, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 3, v26
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s16, s16, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s17, 8
+; GFX11-FAKE16-NEXT: s_add_i32 s18, s18, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
+; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v24
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v7
+; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s0, s1, s0
+; GFX11-FAKE16-NEXT: s_or_b32 s1, s3, s2
+; GFX11-FAKE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s6, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s9, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s10, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s0, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s1, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 3, v29
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v35, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v33, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 3, v23
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 3, v18
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 3, v25
+; GFX11-FAKE16-NEXT: s_addk_i32 s7, 0x300
+; GFX11-FAKE16-NEXT: s_addk_i32 s8, 0x300
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 3, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xff, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v34, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 3, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v19, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v37, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, v32, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, v22, v4
+; GFX11-FAKE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v21, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v17, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v31, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v10, 0xffff, s4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v11, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: .LBB63_3: ; %end
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB63_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+; GFX11-FAKE16-NEXT: s_branch .LBB63_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
index e6c7b1a..73b57a5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
@@ -1482,46 +1482,87 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
-; GFX11-NEXT: .LBB15_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB15_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB15_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: .LBB15_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3720,46 +3761,87 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
-; GFX11-NEXT: .LBB35_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB35_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB35_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: .LBB35_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5424,27 +5506,24 @@ define <2 x i16> @bitcast_v2bf16_to_v2i16(<2 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB50_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v0, 0x40c00000, v0
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h
; GFX11-TRUE16-NEXT: .LBB50_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -5592,44 +5671,81 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v2i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
-; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v2i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_add_nc_u32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB51_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v2i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB51_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: .LBB51_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7223,46 +7339,87 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v2f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
-; GFX11-NEXT: .LBB63_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v2f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-TRUE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB63_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB63_2
+; GFX11-TRUE16-NEXT: .LBB63_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v2f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX11-FAKE16-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB63_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB63_2
+; GFX11-FAKE16-NEXT: .LBB63_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8653,46 +8810,87 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v1i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
-; GFX11-NEXT: .LBB73_4:
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v1i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-TRUE16-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB73_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB73_2
+; GFX11-TRUE16-NEXT: .LBB73_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v1i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-FAKE16-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB73_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB73_2
+; GFX11-FAKE16-NEXT: .LBB73_4:
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9258,57 +9456,109 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v2bf16_to_v4i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s2, s0, 24
-; GFX11-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
-; GFX11-NEXT: .LBB77_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: s_branch .LBB77_2
-; GFX11-NEXT: .LBB77_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v2bf16_to_v4i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-TRUE16-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 24, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB77_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-TRUE16-NEXT: s_branch .LBB77_2
+; GFX11-TRUE16-NEXT: .LBB77_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v2bf16_to_v4i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-FAKE16-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 24, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB77_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-FAKE16-NEXT: s_branch .LBB77_2
+; GFX11-FAKE16-NEXT: .LBB77_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
index acc0247..d5d2d4aa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
@@ -374,59 +374,112 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v3bf16_to_v3f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s2, s0, 16
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v5, v8 :: v_dual_and_b32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v2, 16, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: s_branch .LBB1_2
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v3bf16_to_v3f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-TRUE16-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0x7fc0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB1_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB1_2
+; GFX11-TRUE16-NEXT: .LBB1_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v3bf16_to_v3f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-FAKE16-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v5, v8 :: v_dual_and_b32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB1_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB1_2
+; GFX11-FAKE16-NEXT: .LBB1_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -803,38 +856,36 @@ define <3 x i16> @bitcast_v3bf16_to_v3i16(<3 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, 0x7fc0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
; GFX11-TRUE16-NEXT: .LBB4_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -1025,56 +1076,105 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v3bf16_to_v3i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
-; GFX11-NEXT: s_lshl_b32 s2, s0, 16
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v2, v0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: s_branch .LBB5_2
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v3bf16_to_v3i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-TRUE16-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v7 :: v_dual_add_nc_u32 v4, v4, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, 0x7fc0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB5_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB5_2
+; GFX11-TRUE16-NEXT: .LBB5_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v3bf16_to_v3i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-FAKE16-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v5, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v2, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB5_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB5_2
+; GFX11-FAKE16-NEXT: .LBB5_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index bff054f..ee23420 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -7351,360 +7351,696 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v16i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v16i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v16i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21906,360 +22242,696 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v16f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v16f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v16f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35969,360 +36641,696 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v8i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v8i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v8i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -49092,360 +50100,696 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v8f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s2, s26, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_and_b32 s1, s25, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s3, s25, 16
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s24, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v9, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s23, 16
-; GFX11-NEXT: v_lshl_or_b32 v14, v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_lshl_b32 s1, s22, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v13, v0, 16, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v7
-; GFX11-NEXT: s_lshl_b32 s1, s21, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v12, v0, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s20, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v7, v8
-; GFX11-NEXT: v_lshl_or_b32 v11, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v10, v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v8, v7
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_bfe_u32 v16, v4, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v16, v4
-; GFX11-NEXT: v_lshl_or_b32 v9, v1, 16, v2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v6, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s18, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v8, v2, 16, v0
-; GFX11-NEXT: s_and_b32 s1, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v6, v4
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v7, v5
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v17, v6, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v17, v6
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, v19, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_lshl_b32 s1, s16, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
-; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v16, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v7, v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v17, v18
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v6, v16
-; GFX11-NEXT: v_lshl_or_b32 v6, v2, 16, v3
-; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v18
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_lshl_or_b32 v5, v5, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: s_lshl_b32 s1, s15, 16
-; GFX11-NEXT: s_and_b32 s0, s14, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v3, v17, 16, 1
-; GFX11-NEXT: v_bfe_u32 v18, v16, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v4
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v18, v18, v16
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
-; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v16
-; GFX11-NEXT: v_bfe_u32 v19, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
-; GFX11-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s13, 16
-; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v4
-; GFX11-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshl_b32 s0, s12, 16
-; GFX11-NEXT: v_bfe_u32 v20, v18, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v26, 0x400000, v21
-; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v19, v21, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v23, v17, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v19, v19, v21
-; GFX11-NEXT: v_add_nc_u32_e32 v20, v20, v18
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v17
-; GFX11-NEXT: v_bfe_u32 v24, v22, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v23, v23, v17
-; GFX11-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
-; GFX11-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v24, v24, v22
-; GFX11-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
-; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v1, v18, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v8f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s26, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v9, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v5, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v8, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v1, v7 :: v_dual_add_nc_u32 v0, v3, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v7, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v5, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v17, v5
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v3, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v17 :: v_dual_add_nc_u32 v3, v3, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v5, v16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v17, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v0.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v19, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v16, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v17, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, v16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, v19, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v18, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v16, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v16, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v20, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v0, v19 :: v_dual_add_nc_u32 v19, v21, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, v22, v17
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v21
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, v18, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v21, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x7fff, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v17.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v8f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s26, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s3, s25, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s24, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v9, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v7 :: v_dual_add_nc_u32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v5, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s23, 16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s22, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s21, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s20, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v7, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v8, v7
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v1, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v6, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s18, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v2, 16, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v6, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v7, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v17, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, v19, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s16, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v17, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v6, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v5, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s15, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v18, v16, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v17
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v18, v18, v16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v20, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v16, v17, v18 :: v_dual_add_nc_u32 v17, v19, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v18, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v21, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v17, 0x7fff, v17
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s12, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v20, v18, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v21
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v17, v19, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v19, v21, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v22, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v23, v17, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, v19, v21
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, v20, v18
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v17
+; GFX11-FAKE16-NEXT: v_bfe_u32 v24, v22, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, v23, v17
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v19, 0x7fff, v19
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v20, 0x7fff, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v24, v24, v22
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v23, 0x7fff, v23
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v19, v26, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v21, 0x7fff, v24
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v18, v20, v25, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v21, v24, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v18, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v16, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v20, 16, v17
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -60096,298 +61440,258 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v1
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_lshlrev_b32 v26, 16, v7
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v0 :: v_dual_lshlrev_b32 v19, 16, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v28, 16, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v26, 16, v6
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v28, 16, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_add_f32 v16, 0x40c00000, v16
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v30, 16, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v16, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v17, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v16, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v17, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v0, v0, v16, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v17
+; GFX11-TRUE16-NEXT: v_add3_u32 v22, v22, v17, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v16, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v30, 16, v11
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 0x40c00000, v19 :: v_dual_cndmask_b32 v0, v0, v22
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v20, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 16, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v8
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v19, v20, vcc_lo
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_lshlrev_b32 v18, 16, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v0, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v21, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v20
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v10
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v12
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 0x40c00000, v13 :: v_dual_cndmask_b32 v2, v21, v16
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v16, v18, v20, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v16.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v22, v24, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v22, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v17.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v22, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v22, v23, v20, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v13
-; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v21, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v18, v19, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v19
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v17, v18, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_add_f32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_add3_u32 v18, v20, v21, 0x7fff
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v22, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v18, v19, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v20, v22, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v22
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v16.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v19, v20, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v20, v21, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v18.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v22, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v20, v21, vcc_lo
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 0x40c00000, v23 :: v_dual_add_f32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v23, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v23
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-TRUE16-NEXT: v_add3_u32 v20, v22, v23, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v22, v4, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v20, v21, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v22, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v3, v19, v23
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v24, v21, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v22, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v20.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v24, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v4
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v21, v22 :: v_dual_and_b32 v5, 0xffff0000, v5
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v24, 0x40c00000, v24 :: v_dual_add_f32 v5, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v24
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-TRUE16-NEXT: v_add3_u32 v21, v23, v24, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v5, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v21, v22, vcc_lo
-; GFX11-TRUE16-NEXT: v_add3_u32 v22, v23, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v24, v25, v22, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v21, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v23, 0x40c00000, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v19.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v5, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v24, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v7
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v23, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v22, v23, vcc_lo
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 0x40c00000, v25 :: v_dual_add_f32 v6, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h
-; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v25, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v25
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v22, v24, v25, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v22, v23, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v23, v24, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v24, 0x40c00000, v24 :: v_dual_cndmask_b32 v5, v21, v25
+; GFX11-TRUE16-NEXT: v_add3_u32 v21, v26, v23, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v24, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v23, v26, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v21, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v6
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v6, v23, v24 :: v_dual_and_b32 v7, 0xffff0000, v7
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_add_f32 v7, 0x40c00000, v7
-; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v26, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v26
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v23, v25, v26, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v7, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v23, v24, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v24, v25, v7, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add3_u32 v26, v27, v24, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v23, v25, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v23, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v25, 0x40c00000, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v23, v23, v7, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v26, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v25, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 0x40c00000, v27 :: v_dual_add_f32 v8, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v27, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v27
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v24, v26, v27, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v8, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v24, v25, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v25, v26, v8, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v26, 0x40c00000, v26 :: v_dual_cndmask_b32 v7, v23, v27
+; GFX11-TRUE16-NEXT: v_add3_u32 v23, v28, v25, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v29, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v25, v28, v8, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v23, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v8
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v25, v26 :: v_dual_and_b32 v9, 0xffff0000, v9
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_add_f32 v9, 0x40c00000, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v28, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v28
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v25, v27, v28, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v9, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v25, v26, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v26, v27, v9, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_add3_u32 v28, v29, v26, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v26
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v25, v27, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v27, 0x40c00000, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v22.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v25, v25, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v28, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v30, v27, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v26, v27 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 0x40c00000, v29 :: v_dual_add_f32 v10, 0x40c00000, v10
-; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v29, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v29
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v26, v28, v29, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v10, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v26, v27, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v27, v28, v10, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v28, 0x40c00000, v28 :: v_dual_cndmask_b32 v9, v25, v29
+; GFX11-TRUE16-NEXT: v_add3_u32 v25, v30, v27, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_bfe_u32 v30, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v27, v30, v10, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v25, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v10
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v10, v27, v28 :: v_dual_and_b32 v11, 0xffff0000, v11
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v30, 0x40c00000, v30 :: v_dual_add_f32 v11, 0x40c00000, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v29, v30, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v30
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v26
-; GFX11-TRUE16-NEXT: v_add3_u32 v27, v29, v30, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v29, v11, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v6, 16, v10
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v27, v28, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add3_u32 v28, v29, v11, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add3_u32 v30, v31, v28, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v28
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v27, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v29, 0x40c00000, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v27, v27, v11, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v30, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v29, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v28, v29 :: v_dual_and_b32 v12, 0xffff0000, v12
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v31, 0x40c00000, v31 :: v_dual_add_f32 v12, 0x40c00000, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v11.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v30, v31, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v28, v12, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v27
-; GFX11-TRUE16-NEXT: v_add3_u32 v29, v30, v31, 0x7fff
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v30, 0x40c00000, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
-; GFX11-TRUE16-NEXT: v_add3_u32 v28, v28, v12, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v13, 16, 1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v5, 16, v11
-; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v30, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v29, v32, vcc_lo
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v27, v30, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v27, 0x40c00000, v31
+; GFX11-TRUE16-NEXT: v_add3_u32 v30, v32, v29, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v27, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v30, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v30, v32, v12, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v12
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-TRUE16-NEXT: v_add3_u32 v31, v31, v13, 0x7fff
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v6.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v22
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX11-TRUE16-NEXT: v_add3_u32 v28, v34, v30, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v13, v31, v35 :: v_dual_add_f32 v32, 0x40c00000, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v34 :: v_dual_add_f32 v15, 0x40c00000, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v13.h
-; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v32, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v12.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v15, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v39, 0x400000, v15
-; GFX11-TRUE16-NEXT: v_add3_u32 v35, v36, v32, 0x7fff
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v29
-; GFX11-TRUE16-NEXT: v_add3_u32 v31, v31, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v32, v33, v27, 0x7fff
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v13, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v5, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v16, 16, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v4, 16, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v7.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v30, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v30, 0x40c00000, v33
+; GFX11-TRUE16-NEXT: v_add3_u32 v31, v35, v13, 0x7fff
; GFX11-TRUE16-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v23
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v18.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v1.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v37, v14, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v48, 0x400000, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v4, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v21, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v36, v37, v14, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v37, v38, v34, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v34
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v37, v38, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v34.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v31, v39, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v36, v48, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v15
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v14.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v35, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v29.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v32, v34, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v34, 16, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v30, 16, 1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v26.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v31, v32, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v34
+; GFX11-TRUE16-NEXT: v_add3_u32 v32, v33, v30, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v30
+; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v14, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v31
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v28, v33, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v1, 16, v14
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v28
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v9.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v3, 16, v13
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v8.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v24
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v1, 16, v9
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v3, 16, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v16, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v1, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v18, 16, v20
+; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v27.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v33, v34, v14, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v34, v35, v31, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v31
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v15
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v34, v35, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v15, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v33, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v30.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v3, 16, v19
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v32, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v28.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -61605,325 +62909,620 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v32i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s12, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s13, 16
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: s_and_b32 s2, s14, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: s_lshl_b32 s1, s27, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v5
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v3
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
-; GFX11-NEXT: v_bfe_u32 v7, v9, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v6
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v5, v6, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v8, v4
-; GFX11-NEXT: v_bfe_u32 v7, v10, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v10
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v18, v6, v8 :: v_dual_add_nc_u32 v7, v9, v5
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v10
-; GFX11-NEXT: v_bfe_u32 v8, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v7, v9, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v6
-; GFX11-NEXT: v_bfe_u32 v9, v12, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v5, v7, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v12
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX11-NEXT: v_bfe_u32 v9, v11, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v21, v7, v8, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v5, v8, v10 :: v_dual_add_nc_u32 v8, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v11
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v7
-; GFX11-NEXT: v_bfe_u32 v12, v13, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v13
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v14, v9, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v7, v10, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v9
-; GFX11-NEXT: v_bfe_u32 v14, v15, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v14, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v10, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_bfe_u32 v14, v10, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v23, 0x40c00000, s0
-; GFX11-NEXT: v_dual_cndmask_b32 v24, v11, v12 :: v_dual_add_nc_u32 v9, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v15
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v10
-; GFX11-NEXT: v_bfe_u32 v13, v23, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v10
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v23
-; GFX11-NEXT: v_add_f32_e64 v25, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v15, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v13
-; GFX11-NEXT: v_bfe_u32 v13, v25, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v27, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v12, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v15, v11
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v23
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v25
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_bfe_u32 v28, v14, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_cndmask_b32_e32 v23, v10, v15, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v25
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v12, v27, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v28, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v28, 0x400000, v14
-; GFX11-NEXT: v_bfe_u32 v29, v15, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v11, v11, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: s_lshl_b32 s0, s24, 16
-; GFX11-NEXT: v_bfe_u32 v13, v27, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v25, v12, v28 :: v_dual_add_nc_u32 v12, v29, v15
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v27
-; GFX11-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v15
-; GFX11-NEXT: v_bfe_u32 v30, v14, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v27
-; GFX11-NEXT: v_bfe_u32 v32, v28, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v12, v12, v29 :: v_dual_add_nc_u32 v15, v30, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_or_b32_e32 v30, 0x400000, v14
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v27, v13, v31, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v32, v28
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_or_b32_e32 v31, 0x400000, v28
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v32, v29, 16, 1
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v15, v30 :: v_dual_add_nc_u32 v13, 0x7fff, v13
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: v_add_f32_e64 v33, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v32, v29
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v13, v31, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v31, 0x40c00000, s1
-; GFX11-NEXT: s_and_b32 s0, s27, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v28, v33, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v32, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v34, v30, 16, 1
-; GFX11-NEXT: v_bfe_u32 v35, v31, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v39, 0x400000, v31
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-NEXT: v_bfe_u32 v37, v32, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v34, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v35, v35, v31
-; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v30
-; GFX11-NEXT: v_or_b32_e32 v49, 0x400000, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v37, v37, v32
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v28, v28, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v29
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v35, v39, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v34, v48, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX11-NEXT: v_cndmask_b32_e32 v32, v37, v49, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v29, v15, v36, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_and_or_b32 v15, 0xffff0000, v32, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v28, v38, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v29
-; GFX11-NEXT: v_and_or_b32 v12, 0xffff0000, v27, v31
-; GFX11-NEXT: v_and_or_b32 v11, 0xffff0000, v25, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v9
-; GFX11-NEXT: v_and_or_b32 v14, 0xffff0000, v28, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v6
-; GFX11-NEXT: v_and_or_b32 v9, 0xffff0000, v26, v23
-; GFX11-NEXT: v_and_or_b32 v8, 0xffff0000, v24, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v2
-; GFX11-NEXT: v_and_or_b32 v6, 0xffff0000, v5, v27
-; GFX11-NEXT: v_and_or_b32 v5, 0xffff0000, v21, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
-; GFX11-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v29
-; GFX11-NEXT: v_and_or_b32 v10, 0xffff0000, v10, v33
-; GFX11-NEXT: v_and_or_b32 v4, 0xffff0000, v19, v20
-; GFX11-NEXT: v_and_or_b32 v3, 0xffff0000, v18, v21
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v17, v22
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v16, v23
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v32i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s13, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v16, v5, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v16.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v2, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v18, v3, v7 :: v_dual_add_nc_u32 v5, v5, v10
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v11
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v8
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v5, v9 :: v_dual_add_nc_u32 v5, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v11 :: v_dual_add_nc_u32 v6, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v21, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v10
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v6, v7, v11 :: v_dual_add_nc_u32 v7, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v22, v7, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v13 :: v_dual_add_nc_u32 v8, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v24, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v8, v9, v13 :: v_dual_add_nc_u32 v9, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v24, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v24
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v9, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v25, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v26.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v15 :: v_dual_add_nc_u32 v10, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v25
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, v10, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v11, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v15, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, v25, v28
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v30, v12, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v27, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v25
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v28
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v13, v14, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v25, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, v13, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v25
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v25
+; GFX11-TRUE16-NEXT: v_bfe_u32 v27, v31, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v27, v27, v31
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v28.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v29, v13, v29, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v29.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v13, v14, v32 :: v_dual_add_nc_u32 v14, 0x7fff, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, 0x400000, v31
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v27, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v33, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v14, v32, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v34, v27
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v35, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, v36, v33
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, v25, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v35, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v38, 0x400000, v33
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v27, v14, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v25, 0x7fff, v25
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, v32, v35
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v35
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v34, v38, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v32, 0x7fff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v17.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v25, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v27.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v32, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v33.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v32i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s13, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s27, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v16, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v3
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v17, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v8, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v18, v6, v8 :: v_dual_add_nc_u32 v7, v9, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v19, v7, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v20, v5, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v21, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v5, v8, v10 :: v_dual_add_nc_u32 v8, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v7, v10, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v14, v15
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v22, v10, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v23, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v24, v11, v12 :: v_dual_add_nc_u32 v9, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v23, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v23
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v25, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v25, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v12, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v15, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v23
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v25
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v23, v10, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v12, v27, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v28, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v29, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v11, v11, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v27, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v25, v12, v28 :: v_dual_add_nc_u32 v12, v29, v15
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v27
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v14, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v27
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v28, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v12, v12, v29 :: v_dual_add_nc_u32 v15, v30, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v14
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v27, v13, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v32, v28
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v28
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v15, v30 :: v_dual_add_nc_u32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v32, v29
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v13, v31, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v28, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v32, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v35, v31, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v39, 0x400000, v31
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-FAKE16-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v34, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, v35, v31
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v48, 0x400000, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v49, 0x400000, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, v37, v32
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v35, 0x7fff, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, v28, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v37, 0x7fff, v37
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v31, v35, v39, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v30, v34, v48, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v32, v37, v49, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: v_and_or_b32 v7, 0xffff0000, v7, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v29, v15, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_and_or_b32 v15, 0xffff0000, v32, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v28, v28, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v29
+; GFX11-FAKE16-NEXT: v_and_or_b32 v12, 0xffff0000, v27, v31
+; GFX11-FAKE16-NEXT: v_and_or_b32 v11, 0xffff0000, v25, v32
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v9
+; GFX11-FAKE16-NEXT: v_and_or_b32 v14, 0xffff0000, v28, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v6
+; GFX11-FAKE16-NEXT: v_and_or_b32 v9, 0xffff0000, v26, v23
+; GFX11-FAKE16-NEXT: v_and_or_b32 v8, 0xffff0000, v24, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v2
+; GFX11-FAKE16-NEXT: v_and_or_b32 v6, 0xffff0000, v5, v27
+; GFX11-FAKE16-NEXT: v_and_or_b32 v5, 0xffff0000, v21, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v13, 0xffff0000, v13, v29
+; GFX11-FAKE16-NEXT: v_and_or_b32 v10, 0xffff0000, v10, v33
+; GFX11-FAKE16-NEXT: v_and_or_b32 v4, 0xffff0000, v19, v20
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, 0xffff0000, v18, v21
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v17, v22
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v16, v23
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -69965,170 +71564,106 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v48
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v6, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v24
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v7, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v18
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v8, 16, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v19
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v3, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v36
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v51
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v0, v53
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v54
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v1, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v3, v23
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v65
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v70
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v12, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v69
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v86, 0xff, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v65
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v87, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, v12, v81
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v97, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, v86, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v98, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v0, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v2, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v1, v66
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v69
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v71
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v84
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v3, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB99_3
; GFX11-TRUE16-NEXT: .LBB99_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v65
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v16
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v71, v5
-; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v66, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v28
; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v29, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v24
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v23, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v18
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v55, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v54, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v17, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v36
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v53, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v33
; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
@@ -70137,11 +71672,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v31
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
@@ -70159,83 +71689,133 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v51, v4
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v50, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v49, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v35
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v82
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v84, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v83, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v70, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_add_nc_u32 v13, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v66, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v48, v8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v71, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v29, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v23, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v55, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v19, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v54, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v36
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v53, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v51, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v50, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v49, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v35
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v48, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v81, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v24.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v19
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v23, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v22, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v19, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v18, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v14, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v20.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v19, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
; GFX11-TRUE16-NEXT: .LBB99_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -73991,358 +75571,687 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v32bf16_to_v32f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_mov_b32 s15, s3
-; GFX11-NEXT: s_mov_b32 s14, s2
-; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s1, s12, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s13, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s1, s13, 16
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v7, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v4
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_and_b32 s2, s14, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s0, s14, 16
-; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v33, 0x40c00000, s1
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v9, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s15, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v2 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v6, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s15, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v2
-; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v33
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v3, v4 :: v_dual_add_nc_u32 v3, v6, v7
-; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v7
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v8
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v5
-; GFX11-NEXT: v_bfe_u32 v36, v33, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v3
-; GFX11-NEXT: v_bfe_u32 v3, v6, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s16, 0xffff0000
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v3, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: s_lshl_b32 s0, s16, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_and_b32 s0, s17, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v7, v9, v4
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_bfe_u32 v10, v9, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s17, 16
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v4, v6, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s18, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v9
-; GFX11-NEXT: v_bfe_u32 v11, v6, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v5
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s18, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v11, v6
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v7
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s19, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v5
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_lshl_b32 s0, s19, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_bfe_u32 v12, v10, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v12, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_bfe_u32 v13, v8, 16, 1
-; GFX11-NEXT: s_and_b32 s0, s20, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v9, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v10
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v13, v8
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s20, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v9, v9, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
-; GFX11-NEXT: v_bfe_u32 v12, v13, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s21, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v12, v13
-; GFX11-NEXT: v_bfe_u32 v11, v9, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: s_lshl_b32 s0, s21, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v11, v11, v9
-; GFX11-NEXT: v_bfe_u32 v15, v12, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s22, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v10, v10, v14 :: v_dual_add_nc_u32 v11, 0x7fff, v11
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v9
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v10
-; GFX11-NEXT: v_bfe_u32 v10, v13, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v12
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v13
-; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s22, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
-; GFX11-NEXT: v_bfe_u32 v14, v15, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v11
-; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s23, 0xffff0000
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v12, v14, v15
-; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v13, v11, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v12
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v15
-; GFX11-NEXT: v_add_nc_u32_e32 v13, v13, v11
-; GFX11-NEXT: v_bfe_u32 v26, v14, 16, 1
-; GFX11-NEXT: s_lshl_b32 s0, s23, 16
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v26, v26, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: s_and_b32 s0, s24, 0xffff0000
-; GFX11-NEXT: v_bfe_u32 v27, v12, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v14
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v26, v27, v12
-; GFX11-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s24, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v26
-; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v12
-; GFX11-NEXT: v_bfe_u32 v26, v27, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v13
-; GFX11-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s25, 0xffff0000
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v26, v27
-; GFX11-NEXT: v_bfe_u32 v15, v13, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: s_lshl_b32 s0, s25, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_add_nc_u32_e32 v15, v15, v13
-; GFX11-NEXT: v_bfe_u32 v30, v26, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
-; GFX11-NEXT: s_and_b32 s0, s26, 0xffff0000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v14, v29 :: v_dual_add_nc_u32 v15, 0x7fff, v15
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v13
-; GFX11-NEXT: v_add_nc_u32_e32 v30, v30, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_bfe_u32 v14, v27, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v15, v29, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v30
-; GFX11-NEXT: v_or_b32_e32 v29, 0x400000, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s27, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, v14, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v15, v15, v29, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s26, 16
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
-; GFX11-NEXT: v_add_f32_e64 v35, 0x40c00000, s0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_bfe_u32 v34, v29, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v37, 0x400000, v29
-; GFX11-NEXT: v_bfe_u32 v26, v30, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v14, v14, v32, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v27, v34, v29
-; GFX11-NEXT: v_bfe_u32 v32, v35, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v34, v36, v33
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
-; GFX11-NEXT: v_add_nc_u32_e32 v26, v26, v30
-; GFX11-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
-; GFX11-NEXT: v_add_nc_u32_e32 v32, v32, v35
-; GFX11-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
-; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v27, v27, v37 :: v_dual_add_nc_u32 v26, 0x7fff, v26
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_add_nc_u32_e32 v29, 0x7fff, v32
-; GFX11-NEXT: v_or_b32_e32 v32, 0x400000, v35
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v33, v34, v38, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v29, v29, v32, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v29
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v26, v36, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v15, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v11, v28, 16, v33
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v12, v31, 16, v30
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v34
-; GFX11-NEXT: v_lshl_or_b32 v14, v26, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v13, v32, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v5, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v9, v24, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v8, v23, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v7, v22, 16, v27
-; GFX11-NEXT: v_lshl_or_b32 v5, v21, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v3, v18, 16, v19
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v2, v17, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v1, v16, 16, v22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v23
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v32bf16_to_v32f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s1
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s0
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s13, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s2, s14, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s25, 0xffff0000
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v6 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v16, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v1, v6, v2 :: v_dual_add_nc_u32 v2, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v17, 16, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v4, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v29
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v34, v33, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v17.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v2, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, v34, v33
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v6, v7 :: v_dual_add_nc_u32 v4, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v4, v6 :: v_dual_add_nc_u32 v4, v7, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v7, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v5, v9 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, v4, v7
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v11, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v5
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v5, v6, v8 :: v_dual_add_nc_u32 v6, v9, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v8, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v12, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v6
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v9, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v7, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, v6, v9
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v12
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v7, v10, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v22.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v7, v8, v10 :: v_dual_add_nc_u32 v8, v11, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v12
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v10, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v10
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v8
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v11, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v9, v13 :: v_dual_add_nc_u32 v12, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v10
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v8, v11
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v9, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v11
+; GFX11-TRUE16-NEXT: v_bfe_u32 v13, v15, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v24.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v9, v10, v12 :: v_dual_add_nc_u32 v10, v13, v15
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v15
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, v11, v14
+; GFX11-TRUE16-NEXT: v_bfe_u32 v26, v12, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, v26, v12
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v11, v11, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v26, v10, v13
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v28, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v11, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v28, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v27.l
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v12, v12, v14 :: v_dual_add_nc_u32 v13, v15, v28
+; GFX11-TRUE16-NEXT: v_bfe_u32 v14, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v29, 16, 1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v25.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v11, 16, v12
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x7fff, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, v14, v26
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v29
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v30.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v13, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v28, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, v32, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v12, 16, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v31, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v31, v26, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, 0x400000, v13
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, v31, v26
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v31, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v29.l
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v13, v14, v15, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v15, v31, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-TRUE16-NEXT: v_bfe_u32 v36, v14, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, v15, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v14
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v28, v35, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v33
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, v36, v14
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v15
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v31
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v34, v35, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v28, 0x7fff, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v23.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v33
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v15, v15, v36, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v19.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v14, 16, v15
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v28, v37, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v16.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v15, 16, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v31.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v32bf16_to_v32f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s15, s3
+; GFX11-FAKE16-NEXT: s_mov_b32 s14, s2
+; GFX11-FAKE16-NEXT: s_mov_b32 s13, s1
+; GFX11-FAKE16-NEXT: s_mov_b32 s12, s0
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s12, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s12, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s13, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s13, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v7, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s2, s14, 0xffff0000
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s14, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s27, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v33, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v9, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s15, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v2 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s15, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v16, 16, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v38, 0x400000, v33
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v3, v4 :: v_dual_add_nc_u32 v3, v6, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v36, v33, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v17, 16, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v6, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s16, 0xffff0000
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v4, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v3, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s16, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s17, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, v9, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v9, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s17, 16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v4, v6, v7 :: v_dual_add_nc_u32 v7, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s18, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s18, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v11, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v7, v7, v8 :: v_dual_add_nc_u32 v8, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v20
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s19, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v10, v5
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s19, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v10, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v12, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v8, 16, 1
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s20, 0xffff0000
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v9, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v10
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v13, v8
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s20, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v9, v9, v11 :: v_dual_add_nc_u32 v10, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v12, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v9
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s21, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v12, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v9, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s21, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, v11, v9
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s22, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v10, v10, v14 :: v_dual_add_nc_u32 v11, 0x7fff, v11
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v9
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v9, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s22, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v15, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s23, 0xffff0000
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, v14, v15
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v13, v11, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, 0x7fff, v12
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v15
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, v13, v11
+; GFX11-FAKE16-NEXT: v_bfe_u32 v26, v14, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s23, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v11
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, v26, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s24, 0xffff0000
+; GFX11-FAKE16-NEXT: v_bfe_u32 v27, v12, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v13, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v13, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v14
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, v27, v12
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s24, 16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11-FAKE16-NEXT: v_bfe_u32 v26, v27, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s25, 0xffff0000
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v26, v27
+; GFX11-FAKE16-NEXT: v_bfe_u32 v15, v13, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v26, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s25, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, v15, v13
+; GFX11-FAKE16-NEXT: v_bfe_u32 v30, v26, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v27, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s26, 0xffff0000
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v14, v14, v29 :: v_dual_add_nc_u32 v15, 0x7fff, v15
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v13
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v30, v30, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_bfe_u32 v14, v27, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v13, v15, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, 0x7fff, v30
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v29, 0x400000, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v30, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s27, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, v14, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v15, v15, v29, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v29, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s26, 16
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v14, 0x7fff, v14
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v35, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
+; GFX11-FAKE16-NEXT: v_bfe_u32 v34, v29, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v37, 0x400000, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v26, v30, 16, 1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v14, v14, v32, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, v34, v29
+; GFX11-FAKE16-NEXT: v_bfe_u32 v32, v35, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, v36, v33
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v26, v26, v30
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v27, 0x7fff, v27
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v32, v32, v35
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v34, 0x7fff, v34
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v36, 0x400000, v30
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v27, v27, v37 :: v_dual_add_nc_u32 v26, 0x7fff, v26
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v29, 0x7fff, v32
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v32, 0x400000, v35
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v33, v34, v38, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v11
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v29, v29, v32, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v33
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v29
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v26, v26, v36, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v30, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v28, 16, v33
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v31, 16, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v26, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v32, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v5, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v24, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v23, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v22, 16, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v21, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v18, 16, v19
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v17, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v16, 16, v22
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v23
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -81928,170 +83837,106 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v48
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v6, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v24
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v7, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v18
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v8, 16, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v19
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v3, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v36
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v51
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v0, v53
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v54
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v1, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v3, v23
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v65
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v70
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v12, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v69
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v86, 0xff, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v65
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v87, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, v12, v81
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v97, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, v86, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v98, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v0, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v2, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v1, v66
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v69
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v71
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v84
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v3, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB107_3
; GFX11-TRUE16-NEXT: .LBB107_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v65
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v16
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v71, v5
-; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v66, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v28
; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v29, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v24
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v23, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v18
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v55, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v54, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v17, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v36
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v53, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v33
; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
@@ -82100,11 +83945,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v31
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
@@ -82122,83 +83962,133 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v51, v4
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v50, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v49, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v35
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v82
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v84, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v83, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v70, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_add_nc_u32 v13, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v66, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v48, v8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v71, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v29, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v23, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v55, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v19, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v54, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v36
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v53, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v51, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v50, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v49, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v35
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v48, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v81, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v24.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v19
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v23, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v22, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v19, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v18, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v14, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v20.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v19, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
; GFX11-TRUE16-NEXT: .LBB107_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -92186,170 +94076,106 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s9 :: v_dual_and_b32 v1, 0xff, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v31
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v48
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xff, v82
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v36
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v51
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v50
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v6, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xff, v24
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v3, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v7, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v18
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v8, 16, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v54
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v67
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v19
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v8, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v2, 16, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v3, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v30
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xff, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xff, v82
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v49
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v50
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v36
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v0, v51
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v52
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v34
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v0, v53
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v20
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v18
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v3, v54
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v1, v55
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v0.l
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v28
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v1, 16, v10
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v2, v21
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v26
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v3, v23
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v65
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v66
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v70
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v12, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xff, v69
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v83
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v86, 0xff, v64
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v14, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v65
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v87, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v96, v12, v81
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v97, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v86, v86, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v98, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v0, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v1.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v30
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v2, v27
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v29
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v0.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v68
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v1, v66
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v67
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v2.l
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v69
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v0, v70
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v64
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v80
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v71
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v81
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v84
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v85
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v3, v83
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB111_3
; GFX11-TRUE16-NEXT: .LBB111_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v30
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v65
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v16
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v70, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v71, v5
-; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
; GFX11-TRUE16-NEXT: s_add_i32 s24, s24, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v13, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v66, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v28
; GFX11-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v29, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v24
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v6
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v23, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v18
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v34
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_and_b32 s5, s24, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s25, 8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v55, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 3, v32
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v19, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v54, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v17, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v9
; GFX11-TRUE16-NEXT: s_add_i32 s26, s26, 3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v36
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v53, v8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v33
; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s5
; GFX11-TRUE16-NEXT: s_and_b32 s6, s26, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s27, 8
@@ -92358,11 +94184,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_add_i32 s22, s22, 3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 3, v31
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v7
; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s7
; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
@@ -92380,83 +94201,133 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 8
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-TRUE16-NEXT: s_lshl_b32 s3, s3, 8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v51, v4
; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s10
; GFX11-TRUE16-NEXT: s_or_b32 s0, s1, s0
; GFX11-TRUE16-NEXT: s_or_b32 s1, s3, s2
-; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
-; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s9, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s0, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s1, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s10, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v5
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v50, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v49, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xff, v20
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v35
+; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s1
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s9, s10
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 3, v82
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_addk_i32 s5, 0x300
+; GFX11-TRUE16-NEXT: s_addk_i32 s6, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s7, 0x300
; GFX11-TRUE16-NEXT: s_addk_i32 s8, 0x300
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 3, v64
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s7, s8
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s5, s6
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v80
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v67
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v84, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v28
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, 0x300, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v83, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v30
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v22
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 3, v69
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v14, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v70, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v65
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v0.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_add_nc_u32 v13, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v66, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v26
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v7
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v48, v8
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4
-; GFX11-TRUE16-NEXT: s_addk_i32 s4, 0x300
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v85, v1
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x300, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v71, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, 0x300, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v29, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v24
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v22, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v23, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 3, v18
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v18, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v10, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 3, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v34
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v16
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v16, 0x300, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v55, v2
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 3, v32
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v19, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x300, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v54, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v17, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v17, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v36
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x300, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v53, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 3, v33
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v19, 3, v31
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v37
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v5
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v4
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 3, v38
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v51, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v21, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v50, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v49, v19
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xff, v20
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v23, 3, v35
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v24, 0x300, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x300, v19
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v48, v20
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xff, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v81, v3
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x300, v2
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x300, v6
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x300, v7
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x300, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v8, 0xffff, s4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x300, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v24.l
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v20, 0x300, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v19
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x300, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v4, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v7, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v9, 16, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v23, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v22, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v19, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v10, 16, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v18, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v14, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v13, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v3, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v20.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_add_nc_u32 v19, 0x300, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v18.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v3.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s3
; GFX11-TRUE16-NEXT: .LBB111_3: ; %end
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
index a1c0a87..5d4df4b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
@@ -10227,149 +10227,285 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v18f32_to_v36i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s6, s28, 16
-; GFX11-NEXT: s_lshr_b32 s7, s27, 16
-; GFX11-NEXT: s_lshr_b32 s8, s26, 16
-; GFX11-NEXT: s_lshr_b32 s9, s25, 16
-; GFX11-NEXT: s_lshr_b32 s10, s24, 16
-; GFX11-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-NEXT: s_lshr_b32 s12, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s14, s20, 16
-; GFX11-NEXT: s_lshr_b32 s15, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s17, 16
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_add_f32_e64 v13, s29, 1.0
-; GFX11-NEXT: v_add_f32_e64 v14, s28, 1.0
-; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
-; GFX11-NEXT: v_add_f32_e64 v16, s26, 1.0
-; GFX11-NEXT: v_add_f32_e64 v17, s25, 1.0
-; GFX11-NEXT: v_add_f32_e64 v8, s24, 1.0
-; GFX11-NEXT: v_add_f32_e64 v9, s23, 1.0
-; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
-; GFX11-NEXT: v_add_f32_e64 v11, s21, 1.0
-; GFX11-NEXT: v_add_f32_e64 v12, s20, 1.0
-; GFX11-NEXT: v_add_f32_e64 v3, s19, 1.0
-; GFX11-NEXT: v_add_f32_e64 v4, s18, 1.0
-; GFX11-NEXT: v_add_f32_e64 v5, s17, 1.0
-; GFX11-NEXT: v_add_f32_e64 v6, s16, 1.0
-; GFX11-NEXT: v_add_f32_e64 v7, s3, 1.0
-; GFX11-NEXT: v_add_f32_e64 v0, s2, 1.0
-; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
-; GFX11-NEXT: v_add_f32_e64 v2, s0, 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: s_branch .LBB29_5
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB29_2
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
-; GFX11-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
-; GFX11-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
-; GFX11-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
-; GFX11-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
-; GFX11-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
-; GFX11-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
-; GFX11-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-NEXT: .LBB29_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18f32_to_v36i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, s29, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, s28, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, s26, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, s25, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, s24, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, s23, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, s21, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, s20, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, s19, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, s18, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, s16, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, s3, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, s2, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, s0, 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB29_5
+; GFX11-TRUE16-NEXT: .LBB29_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-TRUE16-NEXT: .LBB29_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18f32_to_v36i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, s29, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, s28, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, s26, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, s25, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, s24, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, s23, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, s21, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, s20, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, s19, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, s18, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, s16, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, s3, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, s2, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, s0, 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: s_branch .LBB29_5
+; GFX11-FAKE16-NEXT: .LBB29_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-FAKE16-NEXT: .LBB29_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12999,149 +13135,285 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v18f32_to_v36f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s6, s28, 16
-; GFX11-NEXT: s_lshr_b32 s7, s27, 16
-; GFX11-NEXT: s_lshr_b32 s8, s26, 16
-; GFX11-NEXT: s_lshr_b32 s9, s25, 16
-; GFX11-NEXT: s_lshr_b32 s10, s24, 16
-; GFX11-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-NEXT: s_lshr_b32 s12, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s14, s20, 16
-; GFX11-NEXT: s_lshr_b32 s15, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s17, 16
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_add_f32_e64 v13, s29, 1.0
-; GFX11-NEXT: v_add_f32_e64 v14, s28, 1.0
-; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
-; GFX11-NEXT: v_add_f32_e64 v16, s26, 1.0
-; GFX11-NEXT: v_add_f32_e64 v17, s25, 1.0
-; GFX11-NEXT: v_add_f32_e64 v8, s24, 1.0
-; GFX11-NEXT: v_add_f32_e64 v9, s23, 1.0
-; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
-; GFX11-NEXT: v_add_f32_e64 v11, s21, 1.0
-; GFX11-NEXT: v_add_f32_e64 v12, s20, 1.0
-; GFX11-NEXT: v_add_f32_e64 v3, s19, 1.0
-; GFX11-NEXT: v_add_f32_e64 v4, s18, 1.0
-; GFX11-NEXT: v_add_f32_e64 v5, s17, 1.0
-; GFX11-NEXT: v_add_f32_e64 v6, s16, 1.0
-; GFX11-NEXT: v_add_f32_e64 v7, s3, 1.0
-; GFX11-NEXT: v_add_f32_e64 v0, s2, 1.0
-; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
-; GFX11-NEXT: v_add_f32_e64 v2, s0, 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: s_branch .LBB33_5
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB33_2
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
-; GFX11-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
-; GFX11-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
-; GFX11-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
-; GFX11-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
-; GFX11-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
-; GFX11-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
-; GFX11-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
-; GFX11-NEXT: .LBB33_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v18f32_to_v36f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v17, s29, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v16, s28, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v14, s26, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v13, s25, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v12, s24, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v11, s23, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v9, s21, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, s20, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, s19, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, s18, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, s16, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, s3, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, s2, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, s0, 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB33_5
+; GFX11-TRUE16-NEXT: .LBB33_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-TRUE16-NEXT: .LBB33_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v18f32_to_v36f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v13, s29, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v14, s28, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v15, s27, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v16, s26, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v17, s25, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, s24, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v9, s23, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v10, s22, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v11, s21, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v12, s20, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, s19, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, s18, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, s17, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, s16, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, s3, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, s2, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, s1, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, s0, 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: s_branch .LBB33_5
+; GFX11-FAKE16-NEXT: .LBB33_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v3, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s20 :: v_dual_mov_b32 v11, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v9, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-FAKE16-NEXT: .LBB33_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21895,140 +22167,270 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v9f64_to_v36i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s14, s28, 16
-; GFX11-NEXT: s_lshr_b32 s6, s27, 16
-; GFX11-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-NEXT: s_lshr_b32 s7, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s8, s23, 16
-; GFX11-NEXT: s_lshr_b32 s41, s22, 16
-; GFX11-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s11, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s16, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 16
-; GFX11-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-NEXT: s_branch .LBB49_5
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
-; GFX11-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
-; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
-; GFX11-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
-; GFX11-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
-; GFX11-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
-; GFX11-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
-; GFX11-NEXT: .LBB49_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v33, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v32, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v7, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v26, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v1, v35, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v31, 16, v34
-; GFX11-NEXT: v_lshl_or_b32 v9, v29, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v22, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v24, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v16, v21, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v17, v23, 16, v19
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9f64_to_v36i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB49_5
+; GFX11-TRUE16-NEXT: .LBB49_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v35, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s45 :: v_dual_mov_b32 v33, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v31, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v27, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s41 :: v_dual_mov_b32 v25, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s40 :: v_dual_mov_b32 v23, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s15 :: v_dual_mov_b32 v21, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v19, s5
+; GFX11-TRUE16-NEXT: .LBB49_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v32 :: v_dual_mov_b32 v31, v31
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, v34 :: v_dual_mov_b32 v35, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v32.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v33 :: v_dual_mov_b32 v27, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v30 :: v_dual_mov_b32 v29, v29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v28 :: v_dual_mov_b32 v25, v25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v26 :: v_dual_mov_b32 v21, v21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v24 :: v_dual_mov_b32 v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v23
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v18.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v19.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9f64_to_v36i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-FAKE16-NEXT: s_branch .LBB49_5
+; GFX11-FAKE16-NEXT: .LBB49_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
+; GFX11-FAKE16-NEXT: .LBB49_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v33, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v7, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v26, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v31, 16, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v23, 16, v19
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24595,140 +24997,270 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr22
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v9f64_to_v36f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s14, s28, 16
-; GFX11-NEXT: s_lshr_b32 s6, s27, 16
-; GFX11-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-NEXT: s_lshr_b32 s7, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s8, s23, 16
-; GFX11-NEXT: s_lshr_b32 s41, s22, 16
-; GFX11-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s11, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s16, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 16
-; GFX11-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX11-NEXT: s_branch .LBB53_5
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
-; GFX11-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
-; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
-; GFX11-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
-; GFX11-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
-; GFX11-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
-; GFX11-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
-; GFX11-NEXT: .LBB53_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v3, v33, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v32, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v7, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v26, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v1, v35, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v7, v31, 16, v34
-; GFX11-NEXT: v_lshl_or_b32 v9, v29, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v22, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v24, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v16, v21, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v17, v23, 16, v19
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v9f64_to_v36f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], s[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-TRUE16-NEXT: s_branch .LBB53_5
+; GFX11-TRUE16-NEXT: .LBB53_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v35, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s45 :: v_dual_mov_b32 v33, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v31, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v27, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s41 :: v_dual_mov_b32 v25, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s40 :: v_dual_mov_b32 v23, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s15 :: v_dual_mov_b32 v21, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v19, s5
+; GFX11-TRUE16-NEXT: .LBB53_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v32 :: v_dual_mov_b32 v31, v31
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, v34 :: v_dual_mov_b32 v35, v35
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v32.l
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, v33 :: v_dual_mov_b32 v27, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v30 :: v_dual_mov_b32 v29, v29
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v28 :: v_dual_mov_b32 v25, v25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v26 :: v_dual_mov_b32 v21, v21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v24 :: v_dual_mov_b32 v19, v19
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v22, v23
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v18.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v19.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v9f64_to_v36f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], s[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], s[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], s[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], s[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], s[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], s[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v0
+; GFX11-FAKE16-NEXT: s_branch .LBB53_5
+; GFX11-FAKE16-NEXT: .LBB53_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr46
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr45
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr44
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr43
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr42
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr41
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr40
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v8, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v20, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s28 :: v_dual_mov_b32 v4, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
+; GFX11-FAKE16-NEXT: .LBB53_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v33, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v32, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v7, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v26, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v35, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v31, 16, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v29, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v22, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v21, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v23, 16, v19
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27654,149 +28186,285 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, v19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36i16_to_v36f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-NEXT: s_lshr_b32 s44, s28, 16
-; GFX11-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-NEXT: s_lshr_b32 s42, s26, 16
-; GFX11-NEXT: s_lshr_b32 s41, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s15, s23, 16
-; GFX11-NEXT: s_lshr_b32 s14, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s12, s20, 16
-; GFX11-NEXT: s_lshr_b32 s11, s19, 16
-; GFX11-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
-; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s28, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s26, s26, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s25, s25, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s24, s24, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s15, s23, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s22, s14
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s21, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s20, s12
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s19, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s8
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s5
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, s28, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v16, s26, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v17, s25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v9, s15, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, s11, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, s10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, s0, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v0, s2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v7, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: s_branch .LBB57_5
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
-; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
-; GFX11-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
-; GFX11-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
-; GFX11-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
-; GFX11-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
-; GFX11-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
-; GFX11-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-NEXT: .LBB57_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36i16_to_v36f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: s_branch .LBB57_5
+; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB57_2
+; GFX11-TRUE16-NEXT: .LBB57_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36i16_to_v36f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s28, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s26, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s25, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s2, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s3, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: s_branch .LBB57_5
+; GFX11-FAKE16-NEXT: .LBB57_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB57_2
+; GFX11-FAKE16-NEXT: .LBB57_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29137,149 +29805,285 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, v19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v36f16_to_v36i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-NEXT: s_lshr_b32 s44, s28, 16
-; GFX11-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-NEXT: s_lshr_b32 s42, s26, 16
-; GFX11-NEXT: s_lshr_b32 s41, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s15, s23, 16
-; GFX11-NEXT: s_lshr_b32 s14, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s12, s20, 16
-; GFX11-NEXT: s_lshr_b32 s11, s19, 16
-; GFX11-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
-; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s28, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s26, s26, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s25, s25, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s24, s24, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s15, s23, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s22, s14
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s21, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s20, s12
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s19, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s8
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s5
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
-; GFX11-NEXT: s_branch .LBB59_5
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
-; GFX11-NEXT: .LBB59_4:
-; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
-; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
-; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
-; GFX11-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
-; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
-; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
-; GFX11-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
-; GFX11-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
-; GFX11-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
-; GFX11-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
-; GFX11-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
-; GFX11-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
-; GFX11-NEXT: .LBB59_5: ; %end
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v0, v35, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v1, v34, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v2, v33, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v32, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v4, v31, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v6, v29, 16, v33
-; GFX11-NEXT: v_lshl_or_b32 v7, v28, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v8, v27, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v9, v26, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v24, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v12, v23, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v5, v30, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v10, v25, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v13, v22, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v14, v21, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v20, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v19, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v17, v18, 16, v24
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v36f16_to_v36i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v17
+; GFX11-TRUE16-NEXT: s_branch .LBB59_5
+; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB59_2
+; GFX11-TRUE16-NEXT: .LBB59_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v19 :: v_dual_mov_b32 v18, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v18.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v36f16_to_v36i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s22, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v13
+; GFX11-FAKE16-NEXT: s_branch .LBB59_5
+; GFX11-FAKE16-NEXT: .LBB59_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB59_2
+; GFX11-FAKE16-NEXT: .LBB59_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v8, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v12, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s19 :: v_dual_mov_b32 v4, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v0, s2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v19, s44
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s43 :: v_dual_mov_b32 v21, s42
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s41 :: v_dual_mov_b32 v23, s40
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
+; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v35, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v33, 16, v36
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v32, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v31, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v29, 16, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v28, 16, v34
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v27, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v26, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v24, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v23, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v25, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v22, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v21, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v20, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v19, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v24
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index 47cb6bd..44cfd6c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -4913,93 +4913,270 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20i32_scalar:
@@ -8342,93 +8519,270 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20i32_scalar:
@@ -11100,142 +11454,271 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v20f32_to_v40i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
-; GFX11-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
-; GFX11-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
-; GFX11-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
-; GFX11-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v21, v21, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v5, v36, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v6, v35, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v9, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v3, v38, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v37, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v7, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v20f32_to_v40i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v20f32_to_v40i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v37, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12629,93 +13112,270 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20f32_scalar:
@@ -14269,142 +14929,271 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v20f32_to_v40f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
-; GFX11-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
-; GFX11-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
-; GFX11-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
-; GFX11-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v21, v21, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v5, v36, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v6, v35, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v9, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v3, v38, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v37, 16, v48
-; GFX11-NEXT: v_lshl_or_b32 v7, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr21
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v20f32_to_v40f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v20f32_to_v40f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v19, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v3, s16
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v9, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v7, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v21, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v36, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v35, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v37, 16, v48
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16043,93 +16832,270 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20f32_scalar:
@@ -19655,93 +20621,270 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10i64_scalar:
@@ -23094,93 +24237,270 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10i64_scalar:
@@ -24382,142 +25702,271 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v10f64_to_v40i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v37, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v36, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v33, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v5, v35, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v9, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr9
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v10f64_to_v40i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v10f64_to_v40i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v36, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v35, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v9, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25911,93 +27360,270 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10f64_scalar:
@@ -27484,142 +29110,271 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr26
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v10f64_to_v40f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v3, v37, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v4, v36, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v34, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v33, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v5, v35, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v21
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v9, 16, v36
-; GFX11-NEXT: v_lshl_or_b32 v10, v31, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, v20
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr2
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr9
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v10f64_to_v40f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v10f64_to_v40f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s20 :: v_dual_mov_b32 v6, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v13, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v3, v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v36, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v33, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v35, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v30, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v28, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v22, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v9, 16, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v31, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v29, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v27, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v26, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v25, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v24, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v23, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v20
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29258,93 +31013,270 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v0.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s19, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v1 :: v_dual_mov_b32 v186, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s7 :: v_dual_mov_b32 v5, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s9 :: v_dual_mov_b32 v7, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s11 :: v_dual_mov_b32 v9, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v11, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:296
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10f64_scalar:
@@ -31057,12 +32989,10 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v40f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v19.h
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
@@ -31083,17 +33013,16 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v20.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
@@ -31109,59 +33038,61 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s9
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s25 :: v_dual_mov_b32 v12, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s23 :: v_dual_mov_b32 v14, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s17 :: v_dual_mov_b32 v2, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s1 :: v_dual_mov_b32 v21, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v23, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s43 :: v_dual_mov_b32 v25, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v27, s40
@@ -31172,47 +33103,37 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s9
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v36, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v31, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v30, 16, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v25, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v26, 16, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v24, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v22, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v40f16_scalar:
@@ -32879,12 +34800,10 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v40i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v19.h
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
@@ -32905,17 +34824,16 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v20.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
@@ -32931,59 +34849,61 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s9
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v19
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s25 :: v_dual_mov_b32 v12, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s23 :: v_dual_mov_b32 v14, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s17 :: v_dual_mov_b32 v2, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s1 :: v_dual_mov_b32 v21, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v23, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s43 :: v_dual_mov_b32 v25, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v27, s40
@@ -32994,47 +34914,37 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s9
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v20
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v36, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v31, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v21
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v30, 16, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v30, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v25, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v27, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v26, 16, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v24, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v23, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v22, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v20 :: v_dual_mov_b32 v1, v21
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v21 :: v_dual_mov_b32 v20, v20
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v20.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v40i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index 11f90b9..14e17ce 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -2411,66 +2411,123 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_i64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_i64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-TRUE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB23_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB23_2
+; GFX11-TRUE16-NEXT: .LBB23_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_i64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-FAKE16-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB23_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB23_2
+; GFX11-FAKE16-NEXT: .LBB23_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5542,66 +5599,123 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_f64_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
-; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_f64_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB47_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_f64_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB47_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: .LBB47_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8386,66 +8500,123 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v2i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
-; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v2i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-TRUE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB67_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB67_2
+; GFX11-TRUE16-NEXT: .LBB67_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v2i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-FAKE16-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB67_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB67_2
+; GFX11-FAKE16-NEXT: .LBB67_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10905,66 +11076,123 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v2f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v2f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-TRUE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v2, v5 :: v_dual_add_nc_u32 v9, v9, v4
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v5, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v9.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB83_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB83_2
+; GFX11-TRUE16-NEXT: .LBB83_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v2f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-FAKE16-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB83_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB83_2
+; GFX11-FAKE16-NEXT: .LBB83_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12814,47 +13042,40 @@ define <4 x i16> @bitcast_v4bf16_to_v4i16(<4 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v9, v9, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v2, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v5 :: v_dual_add_f32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v0, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v0, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v6, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v0, 16, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v2, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
; GFX11-TRUE16-NEXT: .LBB94_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -13071,60 +13292,112 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v4i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
-; GFX11-NEXT: .LBB95_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v4i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-TRUE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB95_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB95_2
+; GFX11-TRUE16-NEXT: .LBB95_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v4i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-FAKE16-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB95_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB95_2
+; GFX11-FAKE16-NEXT: .LBB95_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14889,65 +15162,124 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v4f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s0
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_dual_cndmask_b32 v3, v4, v8 :: v_dual_and_b32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
-; GFX11-NEXT: .LBB103_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v4f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s2, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-TRUE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v0, v3, v7 :: v_dual_add_nc_u32 v5, v5, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB103_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB103_2
+; GFX11-TRUE16-NEXT: .LBB103_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v4f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s2, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX11-FAKE16-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v3, v4, v8 :: v_dual_and_b32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB103_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB103_2
+; GFX11-FAKE16-NEXT: .LBB103_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16614,88 +16946,172 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v4bf16_to_v8i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
-; GFX11-NEXT: s_lshr_b32 s6, s1, 24
-; GFX11-NEXT: s_lshr_b32 s8, s1, 16
-; GFX11-NEXT: s_lshr_b32 s7, s1, 8
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
-; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, v3, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v9, v2, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshl_or_b32 v10, v6, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v10
-; GFX11-NEXT: v_mov_b32_e32 v4, v8
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB109_2
-; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
-; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v4bf16_to_v8i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-TRUE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v3, v7 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, v5, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v5
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v1.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v8.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v9
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v6.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v10
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v8
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB109_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: s_branch .LBB109_2
+; GFX11-TRUE16-NEXT: .LBB109_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v4bf16_to_v8i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB109_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-FAKE16-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v10 :: v_dual_add_nc_u32 v3, v3, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v2, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v6, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[9:10]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v8
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB109_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr3
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr5
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr2
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr7
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: s_branch .LBB109_2
+; GFX11-FAKE16-NEXT: .LBB109_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index 2cc7c44..87d5157 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -5328,105 +5328,278 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22i32_scalar:
@@ -9137,105 +9310,278 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22i32_scalar:
@@ -12099,155 +12445,295 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v22f32_to_v44i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
-; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
-; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v23, v23, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v7, v48, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v5, v50, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v49, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v39, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v37, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v22f32_to_v44i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v22f32_to_v44i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v23, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v49, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v37, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13805,105 +14291,278 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22f32_scalar:
@@ -15630,155 +16289,295 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v22f32_to_v44f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
-; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
-; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
-; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v23, v23, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v7, v48, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v5, v50, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v49, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v8, v39, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v37, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v9, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr23
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v22f32_to_v44f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v22f32_to_v44f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v21, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v19, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v5, s18
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v23, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v48, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v50, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v49, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v39, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v37, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17607,105 +18406,278 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22f32_scalar:
@@ -21568,105 +22540,278 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11i64_scalar:
@@ -25389,105 +26534,278 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11i64_scalar:
@@ -26793,154 +28111,294 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v11f64_to_v44i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v49, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v48, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v39, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v37, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v11, v11, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr11
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v11f64_to_v44i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v11f64_to_v44i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v48, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v39, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v11, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28498,105 +29956,278 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11f64_scalar:
@@ -30248,154 +31879,294 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr30
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v11f64_to_v44f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v4, v4, 16, v5
-; GFX11-NEXT: v_lshl_or_b32 v5, v49, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v6, v48, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v24, v24, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v7, v39, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v21, v26, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v37, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v11, v11, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v28, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-NEXT: v_mov_b32_e32 v2, v22
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr24
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr4
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr11
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v11f64_to_v44f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v11f64_to_v44f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s22 :: v_dual_mov_b32 v8, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[5:6], v[5:6], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v22
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v4, 16, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v5, v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v48, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v39, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v26, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v37, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v11, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v35, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v34, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v33, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v31, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v30, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v29, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v28, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v27, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32224,105 +33995,278 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v3 :: v_dual_mov_b32 v186, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v1 :: v_dual_mov_b32 v188, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v32, 16, v36
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:304
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11f64_scalar:
@@ -34283,15 +36227,10 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v44f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -34313,19 +36252,18 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v22.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
@@ -34343,63 +36281,67 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s24, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s14, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s25 :: v_dual_mov_b32 v16, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s3 :: v_dual_mov_b32 v23, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s1 :: v_dual_mov_b32 v25, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s45 :: v_dual_mov_b32 v27, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s41 :: v_dual_mov_b32 v31, s40
@@ -34410,53 +36352,40 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s5 :: v_dual_mov_b32 v49, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s12
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v48, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v38, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v37, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v30, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v28, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v27, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v44f16_scalar:
@@ -36279,15 +38208,10 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v44i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v21.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -36309,19 +38233,18 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v22.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
@@ -36339,63 +38262,67 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s14 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v21
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s25 :: v_dual_mov_b32 v16, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s19 :: v_dual_mov_b32 v4, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, s3 :: v_dual_mov_b32 v23, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s1 :: v_dual_mov_b32 v25, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s45 :: v_dual_mov_b32 v27, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v29, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s41 :: v_dual_mov_b32 v31, s40
@@ -36406,53 +38333,40 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s5 :: v_dual_mov_b32 v49, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s12
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v51, 16, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v49, 16, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v48, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v38, 16, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v23
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v37, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v33, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v31, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v30, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v29, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v28, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v27, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v26, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v23 :: v_dual_mov_b32 v22, v22
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v22.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v44i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index c35e183..fb2e94f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -5805,117 +5805,286 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24i32_scalar:
@@ -10044,117 +10213,286 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24i32_scalar:
@@ -13212,166 +13550,317 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v24f32_to_v48i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
-; GFX11-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v51, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v12, v49, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v7, v54, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v53, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v11, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v24f32_to_v48i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v24f32_to_v48i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v51, 16, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v49, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v53, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15153,117 +15642,286 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24f32_scalar:
@@ -17167,166 +17825,317 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v24f32_to_v48f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
-; GFX11-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
-; GFX11-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v51, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
-; GFX11-NEXT: v_lshl_or_b32 v12, v49, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v7, v54, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v53, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v11, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr25
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v24f32_to_v48f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v24f32_to_v48f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v23, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v21, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v19, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s18 :: v_dual_mov_b32 v7, s20
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v51, 16, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v25, 16, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v49, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v52, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v54, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v53, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19382,117 +20191,286 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24f32_scalar:
@@ -23764,117 +24742,286 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12i64_scalar:
@@ -28015,117 +29162,286 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12i64_scalar:
@@ -29551,166 +30867,317 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v12f64_to_v48i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v53, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v52, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v51, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v10, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v49, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v48, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v12f64_to_v48i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v12f64_to_v48i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v52, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31492,117 +32959,286 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12f64_scalar:
@@ -33424,166 +35060,317 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr34
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v12f64_to_v48f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v7, v53, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v8, v52, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v51, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v10
-; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v25
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v10, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v11, v49, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v48, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v23, v30, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v24
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr6
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v12f64_to_v48f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v12f64_to_v48f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, s18 :: v_dual_mov_b32 v8, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[7:8], v[7:8], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v24
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v6, v6, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v7, v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v52, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v51, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v26, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v33, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v49, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v48, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v39, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v38, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v37, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v36, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v32, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v31, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v30, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35639,117 +37426,286 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v185, v5 :: v_dual_mov_b32 v186, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v3 :: v_dual_mov_b32 v188, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v1 :: v_dual_mov_b32 v190, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v35, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v20, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:312
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12f64_scalar:
@@ -37964,19 +39920,11 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v48f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v23.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -37998,22 +39946,21 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v24.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -38032,67 +39979,73 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s17 :: v_dual_mov_b32 v29, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s3 :: v_dual_mov_b32 v25, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s1 :: v_dual_mov_b32 v27, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v31, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s43 :: v_dual_mov_b32 v33, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s41 :: v_dual_mov_b32 v35, s40
@@ -38103,58 +40056,43 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s5 :: v_dual_mov_b32 v53, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s14
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v52, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v54, 16, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v53, 16, v64
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v48, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v31, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v28 :: v_dual_mov_b32 v5, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v48f16_scalar:
@@ -40168,19 +42106,11 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v48i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v23.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -40202,22 +42132,21 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v24.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -40236,67 +42165,73 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v23
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s21 :: v_dual_mov_b32 v6, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s17 :: v_dual_mov_b32 v29, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, s3 :: v_dual_mov_b32 v25, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s1 :: v_dual_mov_b32 v27, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v31, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s43 :: v_dual_mov_b32 v33, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s41 :: v_dual_mov_b32 v35, s40
@@ -40307,58 +42242,43 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s5 :: v_dual_mov_b32 v53, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s14
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v24
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v52, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v25
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v54, 16, v26
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v53, 16, v64
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v48, 16, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v39, 16, v53
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v51, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v50, 16, v52
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v35, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v34, 16, v48
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v33, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v32, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v31, 16, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v30, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v24 :: v_dual_mov_b32 v1, v25
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, v28 :: v_dual_mov_b32 v5, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v25 :: v_dual_mov_b32 v24, v24
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v24.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v48i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index 29005a4..07cdbef 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -6286,129 +6286,295 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26i32_scalar:
@@ -10946,129 +11112,295 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26i32_scalar:
@@ -14389,178 +14721,340 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v26f32_to_v52i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
-; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v9, v66, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v65, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v26f32_to_v52i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v26f32_to_v52i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v65, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16527,129 +17021,295 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26f32_scalar:
@@ -18769,178 +19429,340 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v26f32_to_v52f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
-; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
-; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v9, v66, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v65, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr27
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v26f32_to_v52f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v26f32_to_v52f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v25, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v23, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v27, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v64, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v53, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v66, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v65, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21183,129 +22005,295 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26f32_scalar:
@@ -25980,129 +26968,295 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13i64_scalar:
@@ -30655,129 +31809,295 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13i64_scalar:
@@ -32378,178 +33698,340 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr25
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v13f64_to_v52i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v65, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v64, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v66, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v53, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v11, v55, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v15, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr15
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v13f64_to_v52i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v13f64_to_v52i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v64, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v66, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v15, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34516,129 +35998,295 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13f64_scalar:
@@ -36667,178 +38315,340 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr25
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v13f64_to_v52f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v9, v65, 16, v10
-; GFX11-NEXT: v_lshl_or_b32 v10, v64, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v27, v66, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v54, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v53, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v52, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v19, v48, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v11, v55, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v15, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v39, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v28, v28, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v37, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v33
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v25, v34, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v32
-; GFX11-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr28
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr8
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr15
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v13f64_to_v52f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v13f64_to_v52f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, s20 :: v_dual_mov_b32 v10, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[9:10], v[9:10], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v8, v8, 16, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v9, v65, 16, v10
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v64, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v66, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v54, 16, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v53, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v52, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v55, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v15, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v39, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v28, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v37, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v67, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v50, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v38, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v36, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v35, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v34, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v28
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr15
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -39081,129 +40891,295 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v186, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v187, v5 :: v_dual_mov_b32 v188, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v189, v3 :: v_dual_mov_b32 v190, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v191, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v185, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v37, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v36, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v35, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v34, 16, v50
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v33, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v32, 16, v48
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v14, v119
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v53, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v25, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13f64_scalar:
@@ -41806,23 +43782,12 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v52f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -41844,26 +43809,25 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v26.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -41882,71 +43846,79 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v33, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s19 :: v_dual_mov_b32 v31, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s17 :: v_dual_mov_b32 v27, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s3 :: v_dual_mov_b32 v29, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s1 :: v_dual_mov_b32 v33, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v35, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s43 :: v_dual_mov_b32 v37, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s40 :: v_dual_mov_b32 v39, s15
@@ -41957,62 +43929,46 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s5 :: v_dual_mov_b32 v65, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s41
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v65, 16, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v50, 16, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v52, 16, v54
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v68
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v49, 16, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v34, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v37, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v52f16_scalar:
@@ -44258,23 +46214,12 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v52i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v25.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
@@ -44296,26 +46241,25 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v26.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -44334,71 +46278,79 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v33, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v25
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s23 :: v_dual_mov_b32 v8, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s19 :: v_dual_mov_b32 v31, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s17 :: v_dual_mov_b32 v27, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s3 :: v_dual_mov_b32 v29, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s1 :: v_dual_mov_b32 v33, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v35, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s43 :: v_dual_mov_b32 v37, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s40 :: v_dual_mov_b32 v39, s15
@@ -44409,62 +46361,46 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s5 :: v_dual_mov_b32 v65, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s41
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v27
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v33
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v26
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v65, 16, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v64, 16, v28
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v53, 16, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v50, 16, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v2, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v52, 16, v54
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v68
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v36, 16, v50
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v31 :: v_dual_and_b32 v4, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v49, 16, v55
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v35, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v34, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v37, 16, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v4
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, v26 :: v_dual_mov_b32 v3, v27
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v28
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v27 :: v_dual_mov_b32 v26, v26
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v26.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v52i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index 8ee5b96..8eb71e9 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -6779,141 +6779,299 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28i32_scalar:
@@ -11885,141 +12043,299 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28i32_scalar:
@@ -15595,191 +15911,364 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v28f32_to_v56i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
-; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_mov_b32_e32 v5, v35
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v28f32_to_v56i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v28f32_to_v56i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17915,141 +18404,299 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28f32_scalar:
@@ -20379,191 +21026,364 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v28f32_to_v56f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
-; GFX11-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
-; GFX11-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
-; GFX11-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
-; GFX11-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
-; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
-; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_mov_b32_e32 v5, v35
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v28f32_to_v56f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v28f32_to_v56f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s0 :: v_dual_mov_b32 v27, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s2 :: v_dual_mov_b32 v25, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s16 :: v_dual_mov_b32 v23, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v35
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_and_b32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v3, v33
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23006,141 +23826,299 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28f32_scalar:
@@ -28216,141 +29194,299 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14i64_scalar:
@@ -33336,141 +34472,299 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14i64_scalar:
@@ -35225,191 +36519,364 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v14f64_to_v56i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
-; GFX11-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
-; GFX11-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
-; GFX11-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
-; GFX11-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
-; GFX11-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
-; GFX11-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
-; GFX11-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
-; GFX11-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v26
-; GFX11-NEXT: v_mov_b32_e32 v9, v29
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v14f64_to_v56i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v14f64_to_v56i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37545,141 +39012,299 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14f64_scalar:
@@ -39918,191 +41543,364 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v14f64_to_v56f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
-; GFX11-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
-; GFX11-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
-; GFX11-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
-; GFX11-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
-; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
-; GFX11-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
-; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
-; GFX11-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
-; GFX11-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
-; GFX11-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
-; GFX11-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v26
-; GFX11-NEXT: v_mov_b32_e32 v9, v29
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v20
-; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v24, v49, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
-; GFX11-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
-; GFX11-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
-; GFX11-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr29
-; GFX11-NEXT: ; implicit-def: $vgpr10
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v14f64_to_v56f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v14f64_to_v56f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v27, s0 :: v_dual_mov_b32 v28, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v25, s2 :: v_dual_mov_b32 v26, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v23, s16 :: v_dual_mov_b32 v24, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[23:24], v[23:24], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[25:26], v[25:26], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v30, 16, v27
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v71, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v29, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v25
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v10, v10, 16, v11
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v66, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v54, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v69, 16, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v68, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v64, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v52, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v38, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v34
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v67, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v65, 16, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v55, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v53, 16, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v39, 16, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v50, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v49, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v48, 16, v2
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v30 :: v_dual_mov_b32 v1, v31
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v32 :: v_dual_mov_b32 v5, v35
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v7, v37 :: v_dual_mov_b32 v8, v28
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -42545,141 +44343,299 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v189, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v6 :: v_dual_mov_b32 v191, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v4 :: v_dual_mov_b32 v185, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v1 :: v_dual_mov_b32 v187, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v39, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v38, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v37, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v36, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v35, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v34, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v33, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v32, 16, v50
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v28
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v28 :: v_dual_mov_b32 v53, v26
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v28, v64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14f64_scalar:
@@ -45566,27 +47522,13 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v56f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v27.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -45608,30 +47550,29 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v28.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -45650,75 +47591,85 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v33, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v34, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v36, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s21 :: v_dual_mov_b32 v29, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s19 :: v_dual_mov_b32 v34, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v30, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s1 :: v_dual_mov_b32 v32, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s42 :: v_dual_mov_b32 v49, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s40 :: v_dual_mov_b32 v51, s15
@@ -45729,69 +47680,49 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s5 :: v_dual_mov_b32 v69, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s43
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v70, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v68, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v66, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v71, 16, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v69, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v64, 16, v70
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v35 :: v_dual_and_b32 v0, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v65, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v51, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v49, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v39, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v38, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v56f16_scalar:
@@ -48280,27 +50211,13 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v56i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v27.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -48322,30 +50239,29 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v28.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -48364,75 +50280,85 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v33, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v34, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v36, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v14
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v27
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v10, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s21 :: v_dual_mov_b32 v29, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, s19 :: v_dual_mov_b32 v34, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v30, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s1 :: v_dual_mov_b32 v32, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s42 :: v_dual_mov_b32 v49, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s40 :: v_dual_mov_b32 v51, s15
@@ -48443,69 +50369,49 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s5 :: v_dual_mov_b32 v69, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s43
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v70, 16, v31
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v68, 16, v37
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v66, 16, v35
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v71, 16, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v69, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v64, 16, v70
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v28
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v35 :: v_dual_and_b32 v0, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v67, 16, v36
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v65, 16, v69
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v55, 16, v29
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v54, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v52, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v51, 16, v65
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v51, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v50, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v49, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v48, 16, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v39, 16, v51
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v38, 16, v52
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v30
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v32
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v34
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v36 :: v_dual_mov_b32 v7, v37
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v28 :: v_dual_mov_b32 v9, v29
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v29 :: v_dual_mov_b32 v28, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v28.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v56i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index 967f1a9..93c11f1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -7240,153 +7240,305 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB15_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30i32_scalar:
@@ -12840,153 +12992,305 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB19_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB19_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30i32_scalar:
@@ -16802,204 +17106,388 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB29_2
;
-; GFX11-LABEL: bitcast_v30f32_to_v60i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
-; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB29_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v82, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-TRUE16-LABEL: bitcast_v30f32_to_v60i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-TRUE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB29_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB29_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB29_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v30f32_to_v60i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-FAKE16-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB29_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v49
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v48
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB29_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB29_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19290,153 +19778,305 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB31_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB31_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30f32_scalar:
@@ -21985,204 +22625,388 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB33_2
;
-; GFX11-LABEL: bitcast_v30f32_to_v60f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
-; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
-; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
-; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
-; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
-; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
-; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
-; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
-; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
-; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
-; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
-; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
-; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB33_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v19
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v13, v82, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-TRUE16-LABEL: bitcast_v30f32_to_v60f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-TRUE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB33_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB33_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB33_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v30f32_to_v60f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v29, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v27, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v25, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v23, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-FAKE16-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v24, 1.0, v24
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v26, 1.0, v26
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v28, 1.0, v28
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v30, 1.0, v30
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v13
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v31, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB33_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v27
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v82, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v21
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v49
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v48
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB33_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB33_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24867,153 +25691,305 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB35_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB35_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30f32_scalar:
@@ -30472,153 +31448,305 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB43_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB43_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15i64_scalar:
@@ -36089,153 +37217,305 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB47_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB47_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15i64_scalar:
@@ -38144,204 +39424,388 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB49_2
;
-; GFX11-LABEL: bitcast_v15f64_to_v60i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v31, v82, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v9, v33
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_mov_b32_e32 v8, v32
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-LABEL: bitcast_v15f64_to_v60i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB49_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v15f64_to_v60i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB49_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v82, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -40632,153 +42096,305 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s40, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s41, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v189, v189, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v188, v188, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v187, v187, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v186, v186, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v191, v191, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v190, v190, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, s4, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v44, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v54, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v65, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v77, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v90, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v104, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v119, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v135, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v152, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v170, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: .LBB51_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB51_2
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15f64_scalar:
@@ -43227,204 +44843,388 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: s_branch .LBB53_2
;
-; GFX11-LABEL: bitcast_v15f64_to_v60f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
-; GFX11-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
-; GFX11-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
-; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
-; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
-; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
-; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
-; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
-; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
-; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v10
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v25
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v27
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v26
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v26
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshl_or_b32 v48, v48, 16, v26
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX11-NEXT: v_lshl_or_b32 v13, v13, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX11-NEXT: v_lshl_or_b32 v39, v39, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshl_or_b32 v31, v82, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v26, 0xffff, v27
-; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-NEXT: v_lshl_or_b32 v21, v66, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
-; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-NEXT: v_mov_b32_e32 v7, v31
-; GFX11-NEXT: v_lshl_or_b32 v49, v49, 16, v26
-; GFX11-NEXT: v_lshl_or_b32 v26, v53, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v30
-; GFX11-NEXT: v_mov_b32_e32 v9, v33
-; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
-; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v16
-; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
-; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
-; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v29, v50, 16, v4
-; GFX11-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
-; GFX11-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
-; GFX11-NEXT: v_mov_b32_e32 v6, v30
-; GFX11-NEXT: v_mov_b32_e32 v8, v32
-; GFX11-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: ; implicit-def: $vgpr36
-; GFX11-NEXT: ; implicit-def: $vgpr37
-; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: ; implicit-def: $vgpr39
-; GFX11-NEXT: ; implicit-def: $vgpr48
-; GFX11-NEXT: ; implicit-def: $vgpr49
-; GFX11-NEXT: ; implicit-def: $vgpr83
-; GFX11-NEXT: ; implicit-def: $vgpr82
-; GFX11-NEXT: ; implicit-def: $vgpr32
-; GFX11-NEXT: ; implicit-def: $vgpr33
-; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: ; implicit-def: $vgpr35
-; GFX11-NEXT: ; implicit-def: $vgpr12
-; GFX11-NEXT: ; implicit-def: $vgpr13
-; GFX11-NEXT: ; implicit-def: $vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr80
-; GFX11-NEXT: ; implicit-def: $vgpr71
-; GFX11-NEXT: ; implicit-def: $vgpr70
-; GFX11-NEXT: ; implicit-def: $vgpr69
-; GFX11-NEXT: ; implicit-def: $vgpr68
-; GFX11-NEXT: ; implicit-def: $vgpr67
-; GFX11-NEXT: ; implicit-def: $vgpr66
-; GFX11-NEXT: ; implicit-def: $vgpr65
-; GFX11-NEXT: ; implicit-def: $vgpr64
-; GFX11-NEXT: ; implicit-def: $vgpr55
-; GFX11-NEXT: ; implicit-def: $vgpr54
-; GFX11-NEXT: ; implicit-def: $vgpr53
-; GFX11-NEXT: ; implicit-def: $vgpr52
-; GFX11-NEXT: ; implicit-def: $vgpr51
-; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-LABEL: bitcast_v15f64_to_v60f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-TRUE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: .LBB53_3: ; %end
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+;
+; GFX11-FAKE16-LABEL: bitcast_v15f64_to_v60f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s0 :: v_dual_mov_b32 v31, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s2 :: v_dual_mov_b32 v29, s3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, s16 :: v_dual_mov_b32 v27, s17
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v25, s19
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v23, s21
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v21, s23
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-FAKE16-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v52, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v64, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v68, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v19
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v35, 16, v21
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v32, 16, v22
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v25
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v27
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v48, 16, v26
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v29
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v31
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v36, 16, v30
+; GFX11-FAKE16-NEXT: .LBB53_3: ; %end
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v29, 0xffff, v29
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v34, v34, 16, v20
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v19, 0xffff, v19
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v48, v48, 16, v26
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v13, 16, v19
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v68, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v37, v37, 16, v31
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v39, v39, 16, v29
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v82, 16, v25
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v26, 0xffff, v27
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v33, v33, 16, v23
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v35, v35, 16, v21
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v12, v12, 16, v18
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v69, 16, v0
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v66, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v38, v38, 16, v28
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v32, v32, 16, v22
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v20, v67, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v65, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v64, 16, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v31
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v49, v49, 16, v26
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v26, v53, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v37
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v36, v36, 16, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v33
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v83, 16, v24
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v24, v55, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v81, 16, v14
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v71, 16, v16
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v70, 16, v17
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v25, v54, 16, v0
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v52, 16, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v51, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v50, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v36
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, v38 :: v_dual_mov_b32 v3, v39
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, v48 :: v_dual_mov_b32 v5, v49
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v30
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, v32
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr83
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr82
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr81
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr80
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr71
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr70
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr69
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr68
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr67
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr65
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr55
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr54
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr53
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr52
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr51
+; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr50
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -46109,153 +47909,305 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v32.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v32.h
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v70, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v69, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v68, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v64, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v55, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v54, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v52, 0xffff, v11
-; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:252
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v156, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v171, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v172, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v173, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v174, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v175, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v184, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v185, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v186, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v187, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v188, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v189, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v190, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v191, s32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v9 :: v_dual_mov_b32 v25, v7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v191, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v190, v6 :: v_dual_mov_b32 v185, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v186, v3 :: v_dual_mov_b32 v187, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v188, v1 :: v_dual_mov_b32 v189, v0
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s0, 16
+; GFX11-TRUE16-NEXT: s_mov_b32 s42, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s40, s0, s40
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s41, s1, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s2, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s3, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s16, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s17, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s18, s4
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s19, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s20, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s21, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s22, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s23, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s24, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s25, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s26, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s27, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s28, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s29, s15
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v5, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s41 :: v_dual_mov_b32 v9, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s2 :: v_dual_mov_b32 v27, s4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, s3 :: v_dual_mov_b32 v35, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v44, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s7 :: v_dual_mov_b32 v77, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v90, s10 :: v_dual_mov_b32 v119, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v104, s11 :: v_dual_mov_b32 v135, s13
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v152, s14
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v170, s15
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v49, 16, v69
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v48, 16, v68
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v39, 16, v67
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v38, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v37, 16, v65
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v36, 16, v64
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v35, 16, v55
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s40 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s41 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v189, 0x200, v189 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v188, 0x200, v188 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v187, 0x200, v187 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v186, 0x200, v186 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v191, 0x200, v191 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v190, 0x200, v190 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v44, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v54, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v65, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v77, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v90, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v104, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v119, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v135, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v152, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v170, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: .LBB55_3: ; %end
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v2, v5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, v20 :: v_dual_mov_b32 v6, v27
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, v35 :: v_dual_mov_b32 v8, v44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, v77 :: v_dual_mov_b32 v12, v90
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v104
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v189
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v175, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v171, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v170, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v169, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v168, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v159, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v158, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v157, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v156, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
+; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:240
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
+; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:268
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:272
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:276
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:280
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:284
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:288
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:292
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:296
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:300
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:304
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:308
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:312
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:316
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, v9 :: v_dual_mov_b32 v4, v14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, v54 :: v_dual_mov_b32 v10, v65
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v119 :: v_dual_mov_b32 v27, v30
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, v29 :: v_dual_mov_b32 v65, v28
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v66, v30
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v26 :: v_dual_mov_b32 v54, v25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v54 :: v_dual_mov_b32 v26, v53
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v65 :: v_dual_mov_b32 v29, v64
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v30, v66
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169_vgpr170_vgpr171_vgpr172_vgpr173_vgpr174_vgpr175_vgpr176_vgpr177_vgpr178_vgpr179_vgpr180_vgpr181_vgpr182_vgpr183_vgpr184
; GFX11-TRUE16-NEXT: s_branch .LBB55_2
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15f64_scalar:
@@ -49421,31 +51373,14 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v60f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -49467,34 +51402,33 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v29.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v30.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v29, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -49513,79 +51447,91 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v24, v24, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v22, v22, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s29, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s27, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s26, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v34, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v35, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v33, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v49, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v48, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v39, s1, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v38, s2, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v36, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v48
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v38
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
; GFX11-TRUE16-NEXT: .LBB57_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s23 :: v_dual_mov_b32 v35, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s21 :: v_dual_mov_b32 v31, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v33, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v38, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, s1 :: v_dual_mov_b32 v48, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s44 :: v_dual_mov_b32 v51, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s42 :: v_dual_mov_b32 v53, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s40 :: v_dual_mov_b32 v55, s15
@@ -49596,75 +51542,52 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s5 :: v_dual_mov_b32 v81, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s4 :: v_dual_mov_b32 v83, s45
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v39
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v49, v70, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v84, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v39, v80, 16, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v28, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v38, v81, 16, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v85, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v83, 16, v48
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v48, v71, 16, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v64, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v53, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v52, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v50, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v60f16_scalar:
@@ -52368,31 +54291,14 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v60i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, 0
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v29.h
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v28, v10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v26, v8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v24, v6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v22, v4
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, 0
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
@@ -52414,34 +54320,33 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v29.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v28.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v27.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v26.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v25.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v24.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.l, v23.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v22.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v21.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v20.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v19.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v30.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v18.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v30.h
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v11, v29, 16, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v10, v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v9, v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v8, v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v7, v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v6, v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v5, v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v3, v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
@@ -52460,79 +54365,91 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, v0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v24, 0x200, v24 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v22, 0x200, v22 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, v21 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s29 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v34, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v35, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v33, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v49, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v48, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v39, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v38, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v36, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v48
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v38
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v37
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v31
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v14
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 16, v0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 16, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 16, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v68, 16, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v67, 16, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v66, 16, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v65, 16, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v64, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v55, 16, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v52, 16, v15
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v50, 16, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v49, 16, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v39, 16, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v38, 16, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 16, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 16, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v34, 16, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v32, 16, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v31, 16, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v30, 16, v29
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
; GFX11-TRUE16-NEXT: .LBB59_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30
; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s24
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s23 :: v_dual_mov_b32 v35, s22
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s21 :: v_dual_mov_b32 v31, s20
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v33, s18
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, s17 :: v_dual_mov_b32 v36, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v38, s2
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, s1 :: v_dual_mov_b32 v48, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v16, s28
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v14, s26
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v12, s24
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v10, s22
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v8, s20
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v6, s18
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v4, s16
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s44 :: v_dual_mov_b32 v51, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s42 :: v_dual_mov_b32 v53, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s40 :: v_dual_mov_b32 v55, s15
@@ -52543,75 +54460,52 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v80, s5 :: v_dual_mov_b32 v81, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v82, s4 :: v_dual_mov_b32 v83, s45
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v39
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v49, v70, 16, v49
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v84, 0xffff, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v19, 16, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v5
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v37, v82, 16, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v18, 16, v0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v23, 16, v3
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v10
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v49
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v39, v80, 16, v84
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v21, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v22, 16, v2
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v9
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v28, 16, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v39
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v38, v81, 16, v38
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v85, 0xffff, v36
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v80, 0xffff, v30
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v26, v26, 16, v1
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v27, 16, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v37
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v36, v83, 16, v48
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v38
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v48, v71, 16, v85
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v71, 0xffff, v31
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v30, v69, 16, v33
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v68, 16, v32
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v33, v66, 16, v80
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v32, v67, 16, v71
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v66, 0xffff, v34
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v67, 0xffff, v12
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v6
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v34, v65, 16, v35
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v35, v64, 16, v66
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v53, 16, v67
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v53, 0xffff, v15
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v20, v20, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v24, v24, 16, v4
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v54, 16, v13
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v52, 16, v17
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v51, 16, v16
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v50, 16, v53
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v25, v25, 16, v0
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v29, 16, v4
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v36
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v48
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v30 :: v_dual_mov_b32 v7, v31
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, v32 :: v_dual_mov_b32 v9, v33
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v34 :: v_dual_mov_b32 v11, v35
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v83, v83 :: v_dual_mov_b32 v82, v82
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v81, v81 :: v_dual_mov_b32 v80, v80
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v71, v71 :: v_dual_mov_b32 v70, v70
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v69, v69 :: v_dual_mov_b32 v68, v68
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v67, v67 :: v_dual_mov_b32 v66, v66
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v65, v65 :: v_dual_mov_b32 v64, v64
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v55, v55 :: v_dual_mov_b32 v54, v54
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v53, v53 :: v_dual_mov_b32 v52, v52
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v51, v51 :: v_dual_mov_b32 v50, v50
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v49, v49 :: v_dual_mov_b32 v48, v48
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v39, v39 :: v_dual_mov_b32 v38, v38
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, v37 :: v_dual_mov_b32 v36, v36
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, v35 :: v_dual_mov_b32 v34, v34
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v33, v33 :: v_dual_mov_b32 v32, v32
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, v31 :: v_dual_mov_b32 v30, v30
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v83.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v82.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v81.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v80.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v71.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v70.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v69.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v68.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v67.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v66.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v65.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v64.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v55.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v54.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v53.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v52.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v51.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v50.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v49.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v48.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v39.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v38.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v37.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v36.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v35.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v34.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v33.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v32.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v31.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v30.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v60i16_scalar:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
index 9a6ea1b..6ada0cb 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
@@ -2402,89 +2402,171 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v3i32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v4, 16, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
-; GFX11-NEXT: .LBB11_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v3i32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-TRUE16-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v7, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB11_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB11_2
+; GFX11-TRUE16-NEXT: .LBB11_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v3i32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-FAKE16-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB11_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB11_2
+; GFX11-FAKE16-NEXT: .LBB11_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5536,89 +5618,171 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v3f32_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
-; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX11-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v6
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v2, v0, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v0, v4, 16, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v3f32_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-TRUE16-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v5, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v7, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB27_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB27_2
+; GFX11-TRUE16-NEXT: .LBB27_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v3f32_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-FAKE16-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s2, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v6
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_and_b32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v0, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB27_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB27_2
+; GFX11-FAKE16-NEXT: .LBB27_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8229,124 +8393,243 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v4, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v12i8_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s13, s2, 16
-; GFX11-NEXT: s_lshr_b32 s12, s2, 8
-; GFX11-NEXT: s_lshr_b32 s8, s1, 24
-; GFX11-NEXT: s_lshr_b32 s14, s1, 16
-; GFX11-NEXT: s_lshr_b32 s9, s1, 8
-; GFX11-NEXT: s_lshr_b32 s11, s0, 16
-; GFX11-NEXT: s_lshr_b32 s10, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s2
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
-; GFX11-NEXT: v_mov_b32_e32 v12, 0x7fc07fc0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v0, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v2, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v10, v4
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v0, v11, vcc_lo
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
-; GFX11-NEXT: v_bfe_u32 v3, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v9 :: v_dual_add_nc_u32 v3, v3, v7
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshl_or_b32 v2, v6, 16, v3
-; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshl_or_b32 v11, v7, 16, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v11
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_mov_b32_e32 v4, v13
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB39_2
-; GFX11-NEXT: .LBB39_4:
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
-; GFX11-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
-; GFX11-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
-; GFX11-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v12i8_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s1, 24
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s1, 8
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s0, 8
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-TRUE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-TRUE16-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s1, 0, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s2
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, 0x7fc07fc0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v8, v4
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v7, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v1, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v8, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v13.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v8, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v8.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v4.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v3.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v9, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v13
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB39_3:
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-TRUE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-TRUE16-NEXT: s_branch .LBB39_2
+; GFX11-TRUE16-NEXT: .LBB39_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v12i8_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s3, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s1, 24
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s1, 8
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s0, 8
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-FAKE16-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-FAKE16-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s1, 0, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, 0x7fc07fc0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v0, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v2, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, v0, v8
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v10, v4
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v0, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v9 :: v_dual_add_nc_u32 v3, v3, v7
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v8
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v13
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v0
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v6, 16, v3
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v11, v7, 16, v9
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 24, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[3:4], 24, v[1:2]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 8, v11
+; GFX11-FAKE16-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v13
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB39_3:
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr10
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr11
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr4
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr9
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr14
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr8
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr12
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr13
+; GFX11-FAKE16-NEXT: ; implicit-def: $sgpr6
+; GFX11-FAKE16-NEXT: s_branch .LBB39_2
+; GFX11-FAKE16-NEXT: .LBB39_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11712,89 +11995,169 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v6f16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s2
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
-; GFX11-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v2
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v2
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v10, v10, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v8, vcc_lo
-; GFX11-NEXT: v_bfe_u32 v1, v4, 16, 1
-; GFX11-NEXT: v_bfe_u32 v8, v7, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v6, v9 :: v_dual_add_nc_u32 v1, v1, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v8, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v9
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v6
-; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-NEXT: v_lshl_or_b32 v2, v4, 16, v5
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v6f16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-TRUE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, v0, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v0, 0x7fff, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, v10, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v4.l
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v6.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB49_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB49_2
+; GFX11-TRUE16-NEXT: .LBB49_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v6f16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-FAKE16-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v2, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s4, 0, s2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v4, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v7, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, v6, v2
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v6
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v10, v10, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v8, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v6, v9 :: v_dual_add_nc_u32 v1, v1, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, 0x7fff, v10
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, v8, v7
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v9, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v7, v8, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v9
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v6, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v7
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v6
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v4, 16, v5
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB49_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB49_2
+; GFX11-FAKE16-NEXT: .LBB49_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12306,64 +12669,57 @@ define <6 x i16> @bitcast_v6bf16_to_v6i16(<6 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.true
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, 0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v0.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v9, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v9, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v2
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v10, v9, 0x7fff
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_add3_u32 v7, v10, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v12, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v3, v9, v10 :: v_dual_add_f32 v0, 0x40c00000, v0
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v7, 0x40c00000, v1 :: v_dual_add_f32 v0, 0x40c00000, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v12, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v7, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v8, v2, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v6
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v0, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v1, 16, v3
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v5, 16, v4
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v8, v0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v4.h
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v8, v9, 0x7fff
+; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v7, 16, 1
+; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v7, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h
; GFX11-TRUE16-NEXT: .LBB52_2: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -12651,80 +13007,151 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-LABEL: bitcast_v6bf16_to_v6i16_scalar:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
-; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: s_pack_lh_b32_b16 s0, 0, s2
-; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
-; GFX11-NEXT: v_bfe_u32 v2, v5, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, 0x7fff, v7
-; GFX11-NEXT: v_add_nc_u32_e32 v9, v10, v6
-; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v5
-; GFX11-NEXT: v_bfe_u32 v7, v8, 16, 1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_add_nc_u32 v7, v7, v8
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
-; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v9, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v10, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_or_b32 v2, 0xffff0000, v4, v5
-; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v7
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: bitcast_v6bf16_to_v6i16_scalar:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-TRUE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-TRUE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v4, v4, v9 :: v_dual_add_nc_u32 v9, v10, v6
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v4.h
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, v2, v5
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, v7, v8
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x7fff, v7
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v1, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v8
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v7, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v6.h
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-NEXT: .LBB53_3:
+; GFX11-TRUE16-NEXT: s_branch .LBB53_2
+; GFX11-TRUE16-NEXT: .LBB53_4:
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: bitcast_v6bf16_to_v6i16_scalar:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-FAKE16-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s3, 0, s1
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v3, 0x40c00000, s3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_pack_lh_b32_b16 s0, 0, s2
+; GFX11-FAKE16-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v5, 0x40c00000, s1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v6, 0x40c00000, s2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v0, v2, v8 :: v_dual_add_nc_u32 v7, v7, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v8, 0x40c00000, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v4, v9 :: v_dual_add_nc_u32 v4, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, v10, v6
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, 0x7fff, v9
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_add_nc_u32 v7, v7, v8
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v5, 0x7fff, v7
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v9, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v10, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v4, v5
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v3, v6
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v7
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-FAKE16-NEXT: .LBB53_3:
+; GFX11-FAKE16-NEXT: s_branch .LBB53_2
+; GFX11-FAKE16-NEXT: .LBB53_4:
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
index e71bf15..e34aaf20 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
@@ -136,7 +136,7 @@ define i32 @select_sdiv_lhs_opaque_const0_i32(i1 %cond) {
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: v_cndmask_b32_e32 v0, 5, v1, vcc
; GCN-NEXT: v_sub_u32_e32 v1, vcc, 0, v0
-; GCN-NEXT: v_max_i32_e32 v1, v0, v1
+; GCN-NEXT: v_max_i32_e32 v1, v1, v0
; GCN-NEXT: v_cvt_f32_u32_e32 v2, v1
; GCN-NEXT: v_sub_u32_e32 v3, vcc, 0, v1
; GCN-NEXT: s_mov_b32 s4, 0xf4240
@@ -218,7 +218,7 @@ define i32 @select_sdiv_lhs_opaque_const1_i32(i1 %cond) {
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: v_cndmask_b32_e64 v0, v1, 5, vcc
; GCN-NEXT: v_sub_u32_e32 v1, vcc, 0, v0
-; GCN-NEXT: v_max_i32_e32 v1, v0, v1
+; GCN-NEXT: v_max_i32_e32 v1, v1, v0
; GCN-NEXT: v_cvt_f32_u32_e32 v2, v1
; GCN-NEXT: v_sub_u32_e32 v3, vcc, 0, v1
; GCN-NEXT: s_mov_b32 s4, 0xf4240
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index e27164c..948811e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -6191,37 +6191,34 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_ashr_i32 s8, s3, 31
-; GFX6-NEXT: s_add_i32 s3, s3, s8
-; GFX6-NEXT: s_xor_b32 s3, s3, s8
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_ashr_i32 s9, s2, 31
-; GFX6-NEXT: s_add_i32 s2, s2, s9
-; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: s_xor_b32 s2, s2, s9
+; GFX6-NEXT: s_abs_i32 s8, s3
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8
+; GFX6-NEXT: s_sub_i32 s4, 0, s8
+; GFX6-NEXT: s_abs_i32 s9, s2
; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
+; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: s_mul_i32 s0, s0, s8
+; GFX6-NEXT: s_sub_i32 s0, s9, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, s8
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s8
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: s_cselect_b32 s0, s1, s0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s8
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
+; GFX6-NEXT: s_xor_b32 s0, s2, s3
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_xor_b32 s0, s9, s8
+; GFX6-NEXT: s_ashr_i32 s0, s0, 31
; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0
; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
@@ -6233,35 +6230,32 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX9-NEXT: s_ashr_i32 s4, s3, 31
-; GFX9-NEXT: s_add_i32 s3, s3, s4
-; GFX9-NEXT: s_xor_b32 s3, s3, s4
-; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX9-NEXT: s_sub_i32 s6, 0, s3
-; GFX9-NEXT: s_ashr_i32 s5, s2, 31
-; GFX9-NEXT: s_add_i32 s2, s2, s5
+; GFX9-NEXT: s_abs_i32 s4, s3
+; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s4
+; GFX9-NEXT: s_sub_i32 s6, 0, s4
+; GFX9-NEXT: s_abs_i32 s5, s2
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: s_xor_b32 s2, s2, s5
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
; GFX9-NEXT: s_mul_i32 s6, s6, s7
; GFX9-NEXT: s_mul_hi_u32 s6, s7, s6
; GFX9-NEXT: s_add_i32 s7, s7, s6
-; GFX9-NEXT: s_mul_hi_u32 s6, s2, s7
-; GFX9-NEXT: s_mul_i32 s8, s6, s3
-; GFX9-NEXT: s_sub_i32 s2, s2, s8
+; GFX9-NEXT: s_mul_hi_u32 s6, s5, s7
+; GFX9-NEXT: s_mul_i32 s8, s6, s4
+; GFX9-NEXT: s_sub_i32 s5, s5, s8
; GFX9-NEXT: s_add_i32 s7, s6, 1
-; GFX9-NEXT: s_sub_i32 s8, s2, s3
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
+; GFX9-NEXT: s_sub_i32 s8, s5, s4
+; GFX9-NEXT: s_cmp_ge_u32 s5, s4
; GFX9-NEXT: s_cselect_b32 s6, s7, s6
-; GFX9-NEXT: s_cselect_b32 s2, s8, s2
+; GFX9-NEXT: s_cselect_b32 s5, s8, s5
; GFX9-NEXT: s_add_i32 s7, s6, 1
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
-; GFX9-NEXT: s_cselect_b32 s2, s7, s6
-; GFX9-NEXT: s_xor_b32 s3, s5, s4
+; GFX9-NEXT: s_cmp_ge_u32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s7, s6
; GFX9-NEXT: s_xor_b32 s2, s2, s3
-; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: s_ashr_i32 s2, s2, 31
+; GFX9-NEXT: s_xor_b32 s3, s4, s2
+; GFX9-NEXT: s_sub_i32 s2, s3, s2
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
; GFX9-NEXT: s_endpgm
@@ -6706,38 +6700,37 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: srem_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX6-NEXT: s_mov_b32 s7, 0xf000
+; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_ashr_i32 s4, s3, 31
-; GFX6-NEXT: s_add_i32 s3, s3, s4
-; GFX6-NEXT: s_xor_b32 s4, s3, s4
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s4
-; GFX6-NEXT: s_sub_i32 s3, 0, s4
-; GFX6-NEXT: s_ashr_i32 s5, s2, 31
-; GFX6-NEXT: s_add_i32 s2, s2, s5
+; GFX6-NEXT: s_abs_i32 s3, s3
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
+; GFX6-NEXT: s_sub_i32 s4, 0, s3
+; GFX6-NEXT: s_abs_i32 s8, s2
+; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: s_xor_b32 s6, s2, s5
-; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s3, v0
-; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
+; GFX6-NEXT: s_mov_b32 s4, s0
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
-; GFX6-NEXT: v_readfirstlane_b32 s7, v0
-; GFX6-NEXT: s_mul_i32 s7, s7, s4
-; GFX6-NEXT: s_sub_i32 s6, s6, s7
-; GFX6-NEXT: s_sub_i32 s7, s6, s4
-; GFX6-NEXT: s_cmp_ge_u32 s6, s4
-; GFX6-NEXT: s_cselect_b32 s6, s7, s6
-; GFX6-NEXT: s_sub_i32 s7, s6, s4
-; GFX6-NEXT: s_cmp_ge_u32 s6, s4
-; GFX6-NEXT: s_cselect_b32 s4, s7, s6
-; GFX6-NEXT: s_xor_b32 s4, s4, s5
-; GFX6-NEXT: s_sub_i32 s4, s4, s5
-; GFX6-NEXT: v_mov_b32_e32 v0, s4
-; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: s_mul_i32 s0, s0, s3
+; GFX6-NEXT: s_sub_i32 s0, s8, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_ashr_i32 s1, s2, 31
+; GFX6-NEXT: s_xor_b32 s0, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32_pow2_shl_denom:
@@ -6746,32 +6739,29 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX9-NEXT: s_ashr_i32 s4, s3, 31
-; GFX9-NEXT: s_add_i32 s3, s3, s4
-; GFX9-NEXT: s_xor_b32 s3, s3, s4
+; GFX9-NEXT: s_abs_i32 s3, s3
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT: s_sub_i32 s5, 0, s3
-; GFX9-NEXT: s_ashr_i32 s4, s2, 31
-; GFX9-NEXT: s_add_i32 s2, s2, s4
+; GFX9-NEXT: s_abs_i32 s4, s2
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: s_xor_b32 s2, s2, s4
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT: v_readfirstlane_b32 s6, v0
; GFX9-NEXT: s_mul_i32 s5, s5, s6
; GFX9-NEXT: s_mul_hi_u32 s5, s6, s5
; GFX9-NEXT: s_add_i32 s6, s6, s5
-; GFX9-NEXT: s_mul_hi_u32 s5, s2, s6
+; GFX9-NEXT: s_mul_hi_u32 s5, s4, s6
; GFX9-NEXT: s_mul_i32 s5, s5, s3
-; GFX9-NEXT: s_sub_i32 s2, s2, s5
-; GFX9-NEXT: s_sub_i32 s5, s2, s3
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
-; GFX9-NEXT: s_cselect_b32 s2, s5, s2
-; GFX9-NEXT: s_sub_i32 s5, s2, s3
-; GFX9-NEXT: s_cmp_ge_u32 s2, s3
-; GFX9-NEXT: s_cselect_b32 s2, s5, s2
-; GFX9-NEXT: s_xor_b32 s2, s2, s4
-; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_sub_i32 s4, s4, s5
+; GFX9-NEXT: s_sub_i32 s5, s4, s3
+; GFX9-NEXT: s_cmp_ge_u32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_sub_i32 s5, s4, s3
+; GFX9-NEXT: s_cmp_ge_u32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s5, s4
+; GFX9-NEXT: s_ashr_i32 s2, s2, 31
+; GFX9-NEXT: s_xor_b32 s3, s3, s2
+; GFX9-NEXT: s_sub_i32 s2, s3, s2
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
; GFX9-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
index 861621b..c1b8bc6 100644
--- a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll
@@ -410,26 +410,14 @@ define void @undef_lo2_v4i16(<2 x i16> %arg0) {
; GFX11-FAKE16-NEXT: ;;#ASMEND
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-TRUE16-SDAG-LABEL: undef_lo2_v4i16:
-; GFX11-TRUE16-SDAG: ; %bb.0:
-; GFX11-TRUE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-SDAG-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-SDAG-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-TRUE16-SDAG-NEXT: ;;#ASMSTART
-; GFX11-TRUE16-SDAG-NEXT: ; use v[0:1]
-; GFX11-TRUE16-SDAG-NEXT: ;;#ASMEND
-; GFX11-TRUE16-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-TRUE16-GISEL-LABEL: undef_lo2_v4i16:
-; GFX11-TRUE16-GISEL: ; %bb.0:
-; GFX11-TRUE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-GISEL-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX11-TRUE16-GISEL-NEXT: ;;#ASMSTART
-; GFX11-TRUE16-GISEL-NEXT: ; use v[0:1]
-; GFX11-TRUE16-GISEL-NEXT: ;;#ASMEND
-; GFX11-TRUE16-GISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: undef_lo2_v4i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: ;;#ASMSTART
+; GFX11-TRUE16-NEXT: ; use v[0:1]
+; GFX11-TRUE16-NEXT: ;;#ASMEND
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
%undef.lo = shufflevector <2 x i16> %arg0, <2 x i16> poison, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
call void asm sideeffect "; use $0", "v"(<4 x i16> %undef.lo);
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/bypass-div.ll b/llvm/test/CodeGen/AMDGPU/bypass-div.ll
index 3cf70c4..d7d697e 100644
--- a/llvm/test/CodeGen/AMDGPU/bypass-div.ll
+++ b/llvm/test/CodeGen/AMDGPU/bypass-div.ll
@@ -576,11 +576,11 @@ define i32 @sdiv32(i32 %a, i32 %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_u32_e32 v2, 0, v1
-; GFX9-NEXT: v_max_i32_e32 v2, v1, v2
+; GFX9-NEXT: v_max_i32_e32 v2, v2, v1
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v2
; GFX9-NEXT: v_sub_u32_e32 v4, 0, v2
; GFX9-NEXT: v_sub_u32_e32 v5, 0, v0
-; GFX9-NEXT: v_max_i32_e32 v5, v0, v5
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v0
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v0
@@ -640,11 +640,11 @@ define i32 @srem32(i32 %a, i32 %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_sub_u32_e32 v2, 0, v1
-; GFX9-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v2, v1
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, v1
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v1
; GFX9-NEXT: v_sub_u32_e32 v4, 0, v0
-; GFX9-NEXT: v_max_i32_e32 v4, v0, v4
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v0
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v0
; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
index 9c59b42..ab96dcf 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
@@ -563,10 +563,9 @@ define i32 @divergent_vec_i16_HH(i32 %a, i32 %b) {
; GFX11-TRUE16-LABEL: divergent_vec_i16_HH:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: divergent_vec_i16_HH:
diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir
index 043bcc3..f64615d 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-f16-true16.mir
@@ -264,3 +264,90 @@ body: |
$sgpr0 = COPY %16:sreg_32
SI_RETURN_TO_EPILOG $sgpr0
...
+
+---
+name: s_pack_ll_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_ll_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].lo16, %subreg.lo16, [[DEF1]].lo16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_LL_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_lh_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_lh_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].lo16, %subreg.lo16, [[DEF1]].hi16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_LH_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_hl_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_hl_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].hi16, %subreg.lo16, [[DEF1]].lo16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_HL_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_hh_b32_b16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_hh_b32_b16
+ ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[DEF]], implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].hi16, %subreg.lo16, [[DEF1]].hi16, %subreg.hi16
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ %2:sreg_32 = COPY %1:vgpr_32
+ %3:sreg_32 = S_PACK_HH_B32_B16 %0:sreg_32, %2:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_ll_b32_b16_use_SALU16
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_ll_b32_b16_use_SALU16
+ ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_FMAC_F16_t16_e64_:%[0-9]+]]:vgpr_16 = V_FMAC_F16_t16_e64 0, [[DEF]].lo16, 0, [[DEF]].lo16, 0, [[DEF]].lo16, 0, 0, 0, implicit $mode, implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_FMAC_F16_t16_e64_]], %subreg.lo16, [[DEF]].lo16, %subreg.hi16
+ %0:vgpr_32 = IMPLICIT_DEF
+ %1:sreg_32 = COPY %0:vgpr_32
+ %2:sreg_32 = S_FMAC_F16 %1:sreg_32, %1:sreg_32, %1:sreg_32, implicit $mode
+ %3:sreg_32 = S_PACK_LL_B32_B16 %2:sreg_32, %1:sreg_32, implicit-def dead $scc
+...
+
+---
+name: s_pack_ll_b32_b16_use_imm
+body: |
+ bb.0:
+ ; GCN-LABEL: name: s_pack_ll_b32_b16_use_imm
+ ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vgpr_32 = REG_SEQUENCE [[V_MOV_B32_e32_]].lo16, %subreg.lo16, [[DEF]].lo16, %subreg.hi16
+ %0:vgpr_32 = IMPLICIT_DEF
+ %1:sreg_32 = COPY %0:vgpr_32
+ %2:sreg_32 = S_PACK_LL_B32_B16 1, %1:sreg_32, implicit-def dead $scc
+...
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index 76da0aa..10c60df 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -478,41 +478,76 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: s_fneg_fabs_v2bf16_non_bc_src:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x8
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_lshl_b32 s1, s0, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v0, s1, 1.0
-; GFX11-NEXT: v_add_f32_e64 v1, s0, 2.0
-; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: v_mov_b32_e32 v1, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_or_b32_e32 v0, 0x80008000, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: s_fneg_fabs_v2bf16_non_bc_src:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s0, 0xffff0000
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v0, s1, 2.0
+; GFX11-TRUE16-NEXT: v_add_f32_e64 v1, s0, 1.0
+; GFX11-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, 0x80008000, v1
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: s_fneg_fabs_v2bf16_non_bc_src:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0xffff0000
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v0, s1, 1.0
+; GFX11-FAKE16-NEXT: v_add_f32_e64 v1, s0, 2.0
+; GFX11-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, 0x80008000, v0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-FAKE16-NEXT: s_endpgm
%add = fadd <2 x bfloat> %in, <bfloat 1.0, bfloat 2.0>
%fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %add)
%fneg.fabs = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %fabs
@@ -752,42 +787,78 @@ define amdgpu_kernel void @fold_user_fneg_fabs_v2bf16(ptr addrspace(1) %out, <2
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: fold_user_fneg_fabs_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x8
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_and_b32 s1, s0, 0x7fff
-; GFX11-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s0, s0, 0x7fff
-; GFX11-NEXT: v_mul_f32_e64 v0, s1, -4.0
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mul_f32_e64 v1, s0, -4.0
-; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_store_b32 v2, v0, s[0:1]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: fold_user_fneg_fabs_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s1, 0x7fff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v0, s1, -4.0
+; GFX11-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_store_b32 v2, v1, s[0:1]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: fold_user_fneg_fabs_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x8
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s0, 0x7fff
+; GFX11-FAKE16-NEXT: s_lshr_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v0, s1, -4.0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_dual_cndmask_b32 v1, v3, v5 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX11-FAKE16-NEXT: s_endpgm
%fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %in)
%fneg.fabs = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %fabs
%mul = fmul <2 x bfloat> %fneg.fabs, <bfloat 4.0, bfloat 4.0>
@@ -975,46 +1046,88 @@ define amdgpu_kernel void @s_fneg_multi_use_fabs_foldable_neg_v2bf16(ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: s_fneg_multi_use_fabs_foldable_neg_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x10
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_and_b32 s0, s6, 0x7fff
-; GFX11-NEXT: s_lshr_b32 s1, s6, 16
-; GFX11-NEXT: s_lshl_b32 s0, s0, 16
-; GFX11-NEXT: s_and_b32 s1, s1, 0x7fff
-; GFX11-NEXT: v_mul_f32_e64 v0, s0, -4.0
-; GFX11-NEXT: s_lshl_b32 s0, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mul_f32_e64 v1, s0, -4.0
-; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
-; GFX11-NEXT: s_and_b32 s4, s6, 0x7fff7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_and_b32 v0, 0xffff, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_store_b32 v2, v3, s[0:1]
-; GFX11-NEXT: global_store_b32 v2, v0, s[2:3]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: s_fneg_multi_use_fabs_foldable_neg_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x10
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s6, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s1, s6, 0x7fff
+; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v0, s0, -4.0
+; GFX11-TRUE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s6, 0x7fff7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: global_store_b32 v2, v3, s[0:1]
+; GFX11-TRUE16-NEXT: global_store_b32 v2, v1, s[2:3]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: s_fneg_multi_use_fabs_foldable_neg_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x10
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_and_b32 s0, s6, 0x7fff
+; GFX11-FAKE16-NEXT: s_lshr_b32 s1, s6, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s1, s1, 0x7fff
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v0, s0, -4.0
+; GFX11-FAKE16-NEXT: s_lshl_b32 s0, s1, 16
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_mul_f32_e64 v1, s0, -4.0
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v0, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s6, 0x7fff7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, v3, v1
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_and_b32 v0, 0xffff, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: global_store_b32 v2, v3, s[0:1]
+; GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[2:3]
+; GFX11-FAKE16-NEXT: s_endpgm
%fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %in)
%fneg = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %fabs
%mul = fmul <2 x bfloat> %fneg, <bfloat 4.0, bfloat 4.0>
diff --git a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
index 98044a7..84b904f 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
@@ -712,47 +712,88 @@ define amdgpu_kernel void @v_fneg_fold_v2bf16(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
;
-; GFX11-LABEL: v_fneg_fold_v2bf16:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v1, v0, s[2:3]
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT: v_xor_b32_e32 v3, 0x8000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_xor_b32_e32 v2, 0x8000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mul_f32 v3, v3, v4 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11-NEXT: v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshl_or_b32 v1, v1, 16, v2
-; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
-; GFX11-NEXT: s_endpgm
+; GFX11-TRUE16-LABEL: v_fneg_fold_v2bf16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-TRUE16-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_xor_b32_e32 v3, 0x8000, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_xor_b32_e32 v2, 0x8000, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mul_f32 v1, v3, v1 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_mul_f32_e32 v2, v2, v4
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, v3, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: v_fneg_fold_v2bf16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: global_load_b32 v1, v0, s[2:3]
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v3, 0x8000, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v2, 0x8000, v2
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_dual_mul_f32 v3, v3, v4 :: v_dual_lshlrev_b32 v2, 16, v2
+; GFX11-FAKE16-NEXT: v_mul_f32_e32 v1, v2, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
+; GFX11-FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, v4, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-FAKE16-NEXT: s_endpgm
%val = load <2 x bfloat>, ptr addrspace(1) %in
%fsub = fsub <2 x bfloat> <bfloat -0.0, bfloat -0.0>, %val
%fmul = fmul <2 x bfloat> %fsub, %val
diff --git a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
index f048dc5..a43292d 100644
--- a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
@@ -330,11 +330,8 @@ define amdgpu_kernel void @fptosi_v2f16_to_v2i16(
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX11-TRUE16-NEXT: v_cvt_i16_f16_e32 v0.l, v0.l
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cvt_i16_f16_e32 v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cvt_i16_f16_e32 v0.h, v1.l
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
index 96abb3a..96cb621 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
@@ -329,11 +329,8 @@ define amdgpu_kernel void @fptoui_v2f16_to_v2i16(
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX11-TRUE16-NEXT: v_cvt_u16_f16_e32 v0.l, v0.l
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cvt_u16_f16_e32 v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cvt_u16_f16_e32 v0.h, v1.l
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll
index 415828f..35d178c 100644
--- a/llvm/test/CodeGen/AMDGPU/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/frem.ll
@@ -5972,16 +5972,14 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX11-TRUE16-NEXT: .LBB9_16: ; %Flow54
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v1.l
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v0.l|
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v4.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v2.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v3.l|
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, 0x7e00, v7.l, s2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v7.l, s2
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -6422,19 +6420,16 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s2, 0x7c00
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s2, s3
; GFX1150-TRUE16-NEXT: s_cmp_lg_f16 s5, 0
; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s4, 0x7c00
-; GFX1150-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s3, s2
-; GFX1150-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s2
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1150-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s2
; GFX1150-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX1150-TRUE16-NEXT: s_endpgm
;
@@ -6902,20 +6897,17 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s2, 0x7c00
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s2, s3
; GFX1200-TRUE16-NEXT: s_cmp_lg_f16 s5, 0
; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s4, 0x7c00
-; GFX1200-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s3, s2
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1200-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s2
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s2
; GFX1200-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX1200-TRUE16-NEXT: s_endpgm
;
@@ -9346,29 +9338,23 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX11-TRUE16-NEXT: .LBB10_32: ; %Flow124
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v2.l
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v0.l|
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v6.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v4.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v5.l|
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v3.l
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, 0x7e00, v7.l, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v7.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v1.l|
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_lg_f16_e32 vcc_lo, 0, v10.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v8.l, s2
; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, 0x7c00, |v9.l|
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: s_and_b32 s2, s2, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v4.l, 0x7e00, v11.l, s2
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v4, 16, v1
-; GFX11-TRUE16-NEXT: global_store_b64 v3, v[0:1], s[0:1]
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x7e00, v11.l, s2
+; GFX11-TRUE16-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
;
; GFX11-FAKE16-LABEL: frem_v4f16:
@@ -10209,21 +10195,19 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1150-TRUE16-NEXT: s_cselect_b32 s4, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s3, 0x7c00
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s3, s3, s4
; GFX1150-TRUE16-NEXT: s_cmp_lg_f16 s6, 0
; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s3
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s5, 0x7c00
-; GFX1150-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s4, -1, 0
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s3, s4, s3
; GFX1150-TRUE16-NEXT: s_cmp_lg_f16 s2, 0
-; GFX1150-TRUE16-NEXT: v_cndmask_b16 v4.l, 0x7e00, v1.l, s3
+; GFX1150-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s3
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s8, 0x7c00
-; GFX1150-TRUE16-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s3, s2
@@ -10232,13 +10216,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v2, 0
; GFX1150-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1150-TRUE16-NEXT: s_cmp_nge_f16 s7, 0x7c00
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1150-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX1150-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
+; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1150-TRUE16-NEXT: s_and_b32 s2, s3, s2
-; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1150-TRUE16-NEXT: v_cndmask_b16 v3.l, 0x7e00, v3.l, s2
-; GFX1150-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX1150-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x7e00, v3.l, s2
; GFX1150-TRUE16-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1150-TRUE16-NEXT: s_endpgm
;
@@ -11147,18 +11128,14 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s3
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s5, 0x7c00
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s4, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s3, s4, s3
; GFX1200-TRUE16-NEXT: s_cmp_lg_f16 s2, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1200-TRUE16-NEXT: v_cndmask_b16 v4.l, 0x7e00, v1.l, s3
+; GFX1200-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s3
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s8, 0x7c00
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s3, s2
@@ -11168,15 +11145,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v2, 0
; GFX1200-TRUE16-NEXT: s_cselect_b32 s2, -1, 0
; GFX1200-TRUE16-NEXT: s_cmp_nge_f16 s7, 0x7c00
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1200-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX1200-TRUE16-NEXT: s_cselect_b32 s3, -1, 0
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX1200-TRUE16-NEXT: s_and_b32 s2, s3, s2
; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1200-TRUE16-NEXT: v_cndmask_b16 v3.l, 0x7e00, v3.l, s2
-; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1200-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX1200-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x7e00, v3.l, s2
; GFX1200-TRUE16-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1200-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
index 792d7db..76016e4 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
@@ -850,15 +850,13 @@ define amdgpu_kernel void @v_insertelement_v2i16_0_reghi(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x10
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v1, v0, s[2:3]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e64 v2, 16, s4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
; GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
index a2c1545..447a5f2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -361,12 +361,10 @@ define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -444,12 +442,10 @@ define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
index 6f7c001..2e0e420 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
@@ -361,12 +361,10 @@ define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %pt
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -444,12 +442,10 @@ define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %pt
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
index 8896364..ebb33684 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
@@ -455,12 +455,10 @@ define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32
; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -550,12 +548,10 @@ define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32
; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
index 23db247..40be567 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
@@ -455,12 +455,10 @@ define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8)
; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
@@ -550,12 +548,10 @@ define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8)
; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
index 92a2f54..068a989 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maximum.f16.ll
@@ -1053,19 +1053,15 @@ define void @s_maximum_v2f16(<2 x half> inreg %src0, <2 x half> inreg %src1) {
; GFX11-TRUE16-LABEL: s_maximum_v2f16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s2, s0, s1
; GFX11-TRUE16-NEXT: v_pk_max_f16 v0, s0, s1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 16
; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s1, s3, s2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s1
; GFX11-TRUE16-NEXT: ;;#ASMSTART
; GFX11-TRUE16-NEXT: ; use v0
; GFX11-TRUE16-NEXT: ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
index 9e82b41..2482d10 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minimum.f16.ll
@@ -866,19 +866,15 @@ define void @s_minimum_v2f16(<2 x half> inreg %src0, <2 x half> inreg %src1) {
; GFX11-TRUE16-LABEL: s_minimum_v2f16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s2, s0, s1
; GFX11-TRUE16-NEXT: v_pk_min_f16 v0, s0, s1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: s_lshr_b32 s2, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s3, s0, 16
; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s0, s0, s1
+; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s1, s3, s2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x7e00, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x7e00, v1.l, s1
; GFX11-TRUE16-NEXT: ;;#ASMSTART
; GFX11-TRUE16-NEXT: ; use v0
; GFX11-TRUE16-NEXT: ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll
index dcf01f7..818dff4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.sqrt.bf16.ll
@@ -63,14 +63,10 @@ define amdgpu_kernel void @sqrt_v2bf16(ptr addrspace(1) %r, ptr addrspace(1) %a)
; GFX12-TRUE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
; GFX12-TRUE16-NEXT: s_mov_b32 s5, s1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v1.l, v0.l
-; GFX12-TRUE16-NEXT: v_nop
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v0.l, v0.l
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v0.h, v1.l
; GFX12-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
; GFX12-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index b534c2c..6f63384 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -9604,11 +9604,11 @@ define amdgpu_kernel void @constant_zextload_v2i8_to_v2i16(ptr addrspace(1) %out
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v1
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v2
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
; GFX12-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-TRUE16-NEXT: s_endpgm
;
@@ -9738,11 +9738,11 @@ define amdgpu_kernel void @constant_sextload_v2i8_to_v2i16(ptr addrspace(1) %out
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: v_bfe_i32 v2, v1, 0, 16
; GFX12-TRUE16-NEXT: v_bfe_i32 v1, v1, 0, 8
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 8, v2
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
; GFX12-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-TRUE16-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv.ll b/llvm/test/CodeGen/AMDGPU/sdiv.ll
index 5c0f813..441509b 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv.ll
@@ -391,156 +391,144 @@ define amdgpu_kernel void @slow_sdiv_i32_3435(ptr addrspace(1) %out, ptr addrspa
define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; GCN-LABEL: sdiv_v2i32:
; GCN: ; %bb.0:
-; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s3, 0xf000
-; GCN-NEXT: s_mov_b32 s2, -1
-; GCN-NEXT: s_mov_b32 s10, s2
-; GCN-NEXT: s_mov_b32 s11, s3
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: s_mov_b32 s10, s6
+; GCN-NEXT: s_mov_b32 s11, s7
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s8, s6
-; GCN-NEXT: s_mov_b32 s9, s7
+; GCN-NEXT: s_mov_b32 s8, s2
+; GCN-NEXT: s_mov_b32 s9, s3
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_readfirstlane_b32 s0, v2
-; GCN-NEXT: s_abs_i32 s1, s0
-; GCN-NEXT: v_cvt_f32_u32_e32 v2, s1
-; GCN-NEXT: s_sub_i32 s6, 0, s1
-; GCN-NEXT: v_readfirstlane_b32 s8, v3
-; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT: v_mul_lo_u32 v4, s6, v2
-; GCN-NEXT: v_readfirstlane_b32 s6, v0
-; GCN-NEXT: s_abs_i32 s7, s6
-; GCN-NEXT: s_xor_b32 s0, s6, s0
-; GCN-NEXT: v_mul_hi_u32 v4, v2, v4
-; GCN-NEXT: s_ashr_i32 s6, s0, 31
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v2, v4
-; GCN-NEXT: v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_mul_i32 s0, s0, s1
-; GCN-NEXT: s_sub_i32 s0, s7, s0
-; GCN-NEXT: s_sub_i32 s7, s0, s1
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT: s_cmp_ge_u32 s0, s1
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT: s_cselect_b32 s0, s7, s0
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT: s_cmp_ge_u32 s0, s1
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: s_abs_i32 s7, s8
-; GCN-NEXT: v_cvt_f32_u32_e32 v3, s7
-; GCN-NEXT: s_mov_b32 s0, s4
-; GCN-NEXT: s_sub_i32 s4, 0, s7
-; GCN-NEXT: s_mov_b32 s1, s5
-; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, s6, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT: v_mul_lo_u32 v4, s4, v3
-; GCN-NEXT: v_readfirstlane_b32 s4, v1
-; GCN-NEXT: s_xor_b32 s5, s4, s8
-; GCN-NEXT: s_abs_i32 s4, s4
-; GCN-NEXT: v_mul_hi_u32 v1, v3, v4
-; GCN-NEXT: s_ashr_i32 s5, s5, 31
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
-; GCN-NEXT: v_mul_hi_u32 v1, s4, v1
-; GCN-NEXT: v_readfirstlane_b32 s6, v1
-; GCN-NEXT: s_mul_i32 s6, s6, s7
-; GCN-NEXT: s_sub_i32 s4, s4, s6
-; GCN-NEXT: s_sub_i32 s6, s4, s7
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v1
-; GCN-NEXT: s_cmp_ge_u32 s4, s7
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT: s_cselect_b32 s4, s6, s4
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v1
-; GCN-NEXT: s_cmp_ge_u32 s4, s7
-; GCN-NEXT: s_cselect_b64 vcc, -1, 0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT: v_xor_b32_e32 v1, s5, v1
-; GCN-NEXT: v_subrev_i32_e32 v1, vcc, s5, v1
-; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v2
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v3
+; GCN-NEXT: v_xor_b32_e32 v4, v0, v2
+; GCN-NEXT: v_xor_b32_e32 v7, v1, v3
+; GCN-NEXT: v_max_i32_e32 v2, v2, v6
+; GCN-NEXT: v_max_i32_e32 v3, v3, v9
+; GCN-NEXT: v_cvt_f32_u32_e32 v6, v2
+; GCN-NEXT: v_cvt_f32_u32_e32 v9, v3
+; GCN-NEXT: v_sub_i32_e32 v5, vcc, 0, v0
+; GCN-NEXT: v_rcp_iflag_f32_e32 v6, v6
+; GCN-NEXT: v_max_i32_e32 v0, v0, v5
+; GCN-NEXT: v_rcp_iflag_f32_e32 v5, v9
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v2
+; GCN-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
+; GCN-NEXT: v_mul_f32_e32 v5, 0x4f7ffffe, v5
+; GCN-NEXT: v_cvt_u32_f32_e32 v6, v6
+; GCN-NEXT: v_cvt_u32_f32_e32 v5, v5
+; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v3
+; GCN-NEXT: v_mul_lo_u32 v9, v9, v6
+; GCN-NEXT: v_mul_lo_u32 v10, v10, v5
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, 0, v1
+; GCN-NEXT: v_mul_hi_u32 v9, v6, v9
+; GCN-NEXT: v_max_i32_e32 v1, v1, v8
+; GCN-NEXT: v_mul_hi_u32 v8, v5, v10
+; GCN-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; GCN-NEXT: v_add_i32_e32 v6, vcc, v6, v9
+; GCN-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GCN-NEXT: v_mul_hi_u32 v6, v0, v6
+; GCN-NEXT: v_mul_hi_u32 v5, v1, v5
+; GCN-NEXT: v_ashrrev_i32_e32 v7, 31, v7
+; GCN-NEXT: v_mul_lo_u32 v8, v6, v2
+; GCN-NEXT: v_mul_lo_u32 v10, v5, v3
+; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v6
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v10
+; GCN-NEXT: v_add_i32_e32 v11, vcc, 1, v5
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v2
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v3
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[0:1]
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, v1, v3
+; GCN-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3]
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[0:1]
+; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v6
+; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[2:3]
+; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v5
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; GCN-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
+; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v4
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v7
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v7
+; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; TONGA-LABEL: sdiv_v2i32:
; TONGA: ; %bb.0:
-; TONGA-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; TONGA-NEXT: s_mov_b32 s3, 0xf000
-; TONGA-NEXT: s_mov_b32 s2, -1
-; TONGA-NEXT: s_mov_b32 s10, s2
-; TONGA-NEXT: s_mov_b32 s11, s3
+; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; TONGA-NEXT: s_mov_b32 s7, 0xf000
+; TONGA-NEXT: s_mov_b32 s6, -1
+; TONGA-NEXT: s_mov_b32 s10, s6
+; TONGA-NEXT: s_mov_b32 s11, s7
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
-; TONGA-NEXT: s_mov_b32 s8, s6
-; TONGA-NEXT: s_mov_b32 s9, s7
+; TONGA-NEXT: s_mov_b32 s8, s2
+; TONGA-NEXT: s_mov_b32 s9, s3
; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TONGA-NEXT: s_mov_b32 s4, s0
+; TONGA-NEXT: s_mov_b32 s5, s1
; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_readfirstlane_b32 s0, v2
-; TONGA-NEXT: s_abs_i32 s1, s0
-; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s1
-; TONGA-NEXT: s_sub_i32 s6, 0, s1
-; TONGA-NEXT: v_readfirstlane_b32 s8, v3
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2
-; TONGA-NEXT: v_mul_lo_u32 v4, s6, v2
-; TONGA-NEXT: v_readfirstlane_b32 s6, v0
-; TONGA-NEXT: s_abs_i32 s7, s6
-; TONGA-NEXT: s_xor_b32 s0, s6, s0
-; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4
-; TONGA-NEXT: s_ashr_i32 s6, s0, 31
-; TONGA-NEXT: v_add_u32_e32 v0, vcc, v2, v4
-; TONGA-NEXT: v_mul_hi_u32 v0, s7, v0
-; TONGA-NEXT: v_readfirstlane_b32 s0, v0
-; TONGA-NEXT: s_mul_i32 s0, s0, s1
-; TONGA-NEXT: s_sub_i32 s0, s7, s0
-; TONGA-NEXT: s_sub_i32 s7, s0, s1
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v0
-; TONGA-NEXT: s_cmp_ge_u32 s0, s1
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; TONGA-NEXT: s_cselect_b32 s0, s7, s0
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v0
-; TONGA-NEXT: s_cmp_ge_u32 s0, s1
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: s_abs_i32 s7, s8
-; TONGA-NEXT: v_cvt_f32_u32_e32 v3, s7
-; TONGA-NEXT: s_mov_b32 s0, s4
-; TONGA-NEXT: s_sub_i32 s4, 0, s7
-; TONGA-NEXT: s_mov_b32 s1, s5
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; TONGA-NEXT: v_xor_b32_e32 v0, s6, v0
-; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s6, v0
-; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_lo_u32 v4, s4, v3
-; TONGA-NEXT: v_readfirstlane_b32 s4, v1
-; TONGA-NEXT: s_xor_b32 s5, s4, s8
-; TONGA-NEXT: s_abs_i32 s4, s4
-; TONGA-NEXT: v_mul_hi_u32 v1, v3, v4
-; TONGA-NEXT: s_ashr_i32 s5, s5, 31
-; TONGA-NEXT: v_add_u32_e32 v1, vcc, v3, v1
-; TONGA-NEXT: v_mul_hi_u32 v1, s4, v1
-; TONGA-NEXT: v_readfirstlane_b32 s6, v1
-; TONGA-NEXT: s_mul_i32 s6, s6, s7
-; TONGA-NEXT: s_sub_i32 s4, s4, s6
-; TONGA-NEXT: s_sub_i32 s6, s4, s7
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v1
-; TONGA-NEXT: s_cmp_ge_u32 s4, s7
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; TONGA-NEXT: s_cselect_b32 s4, s6, s4
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v1
-; TONGA-NEXT: s_cmp_ge_u32 s4, s7
-; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; TONGA-NEXT: v_xor_b32_e32 v1, s5, v1
-; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s5, v1
-; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, 0, v2
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v3
+; TONGA-NEXT: v_xor_b32_e32 v4, v0, v2
+; TONGA-NEXT: v_xor_b32_e32 v7, v1, v3
+; TONGA-NEXT: v_max_i32_e32 v2, v2, v6
+; TONGA-NEXT: v_max_i32_e32 v3, v3, v9
+; TONGA-NEXT: v_cvt_f32_u32_e32 v6, v2
+; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v3
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v6, v6
+; TONGA-NEXT: v_max_i32_e32 v0, v0, v5
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v5, v9
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v2
+; TONGA-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
+; TONGA-NEXT: v_mul_f32_e32 v5, 0x4f7ffffe, v5
+; TONGA-NEXT: v_cvt_u32_f32_e32 v6, v6
+; TONGA-NEXT: v_cvt_u32_f32_e32 v5, v5
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v3
+; TONGA-NEXT: v_mul_lo_u32 v9, v9, v6
+; TONGA-NEXT: v_mul_lo_u32 v10, v10, v5
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, 0, v1
+; TONGA-NEXT: v_mul_hi_u32 v9, v6, v9
+; TONGA-NEXT: v_max_i32_e32 v1, v1, v8
+; TONGA-NEXT: v_mul_hi_u32 v8, v5, v10
+; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; TONGA-NEXT: v_add_u32_e32 v6, vcc, v6, v9
+; TONGA-NEXT: v_add_u32_e32 v5, vcc, v5, v8
+; TONGA-NEXT: v_mul_hi_u32 v6, v0, v6
+; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5
+; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v7
+; TONGA-NEXT: v_mul_lo_u32 v8, v6, v2
+; TONGA-NEXT: v_mul_lo_u32 v10, v5, v3
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v6
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v10
+; TONGA-NEXT: v_add_u32_e32 v11, vcc, 1, v5
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v2
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v3
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[0:1]
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v1, v3
+; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3]
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[0:1]
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v6
+; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[2:3]
+; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v5
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
+; TONGA-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
+; TONGA-NEXT: v_xor_b32_e32 v0, v0, v4
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v4
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v7
+; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; TONGA-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_v2i32:
@@ -558,44 +546,44 @@ define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX9-NEXT: v_readfirstlane_b32 s0, v2
; GFX9-NEXT: s_abs_i32 s1, s0
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, s1
-; GFX9-NEXT: v_readfirstlane_b32 s4, v0
-; GFX9-NEXT: s_xor_b32 s0, s4, s0
+; GFX9-NEXT: v_readfirstlane_b32 s5, v0
+; GFX9-NEXT: s_xor_b32 s0, s5, s0
; GFX9-NEXT: s_ashr_i32 s6, s0, 31
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
; GFX9-NEXT: s_sub_i32 s0, 0, s1
-; GFX9-NEXT: s_abs_i32 s4, s4
-; GFX9-NEXT: v_readfirstlane_b32 s5, v3
+; GFX9-NEXT: s_abs_i32 s5, s5
+; GFX9-NEXT: v_readfirstlane_b32 s4, v3
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v2
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT: v_readfirstlane_b32 s7, v0
; GFX9-NEXT: s_mul_i32 s0, s0, s7
; GFX9-NEXT: s_mul_hi_u32 s0, s7, s0
; GFX9-NEXT: s_add_i32 s7, s7, s0
-; GFX9-NEXT: s_mul_hi_u32 s0, s4, s7
+; GFX9-NEXT: s_mul_hi_u32 s0, s5, s7
; GFX9-NEXT: s_mul_i32 s7, s0, s1
-; GFX9-NEXT: s_sub_i32 s4, s4, s7
+; GFX9-NEXT: s_sub_i32 s5, s5, s7
; GFX9-NEXT: s_add_i32 s10, s0, 1
-; GFX9-NEXT: s_sub_i32 s7, s4, s1
-; GFX9-NEXT: s_cmp_ge_u32 s4, s1
+; GFX9-NEXT: s_sub_i32 s7, s5, s1
+; GFX9-NEXT: s_cmp_ge_u32 s5, s1
; GFX9-NEXT: s_cselect_b32 s0, s10, s0
-; GFX9-NEXT: s_cselect_b32 s4, s7, s4
+; GFX9-NEXT: s_cselect_b32 s5, s7, s5
; GFX9-NEXT: s_add_i32 s7, s0, 1
-; GFX9-NEXT: s_cmp_ge_u32 s4, s1
-; GFX9-NEXT: s_cselect_b32 s4, s7, s0
-; GFX9-NEXT: s_abs_i32 s7, s5
+; GFX9-NEXT: s_cmp_ge_u32 s5, s1
+; GFX9-NEXT: s_cselect_b32 s5, s7, s0
+; GFX9-NEXT: s_abs_i32 s7, s4
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
-; GFX9-NEXT: s_xor_b32 s4, s4, s6
+; GFX9-NEXT: s_xor_b32 s5, s5, s6
; GFX9-NEXT: s_mov_b32 s1, s9
; GFX9-NEXT: s_sub_i32 s9, 0, s7
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: s_sub_i32 s4, s4, s6
+; GFX9-NEXT: s_sub_i32 s5, s5, s6
; GFX9-NEXT: s_mov_b32 s0, s8
; GFX9-NEXT: v_readfirstlane_b32 s8, v1
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT: s_xor_b32 s5, s8, s5
+; GFX9-NEXT: s_xor_b32 s4, s8, s4
; GFX9-NEXT: s_abs_i32 s8, s8
-; GFX9-NEXT: s_ashr_i32 s5, s5, 31
+; GFX9-NEXT: s_ashr_i32 s4, s4, 31
; GFX9-NEXT: v_readfirstlane_b32 s6, v0
; GFX9-NEXT: s_mul_i32 s9, s9, s6
; GFX9-NEXT: s_mul_hi_u32 s9, s6, s9
@@ -611,10 +599,10 @@ define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GFX9-NEXT: s_add_i32 s9, s6, 1
; GFX9-NEXT: s_cmp_ge_u32 s8, s7
; GFX9-NEXT: s_cselect_b32 s6, s9, s6
-; GFX9-NEXT: s_xor_b32 s6, s6, s5
-; GFX9-NEXT: s_sub_i32 s5, s6, s5
-; GFX9-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_xor_b32 s6, s6, s4
+; GFX9-NEXT: s_sub_i32 s4, s6, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
@@ -804,255 +792,255 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-LABEL: sdiv_v4i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s11, 0xf000
-; GCN-NEXT: s_mov_b32 s10, -1
-; GCN-NEXT: s_mov_b32 s6, s10
-; GCN-NEXT: s_mov_b32 s7, s11
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: s_mov_b32 s10, s6
+; GCN-NEXT: s_mov_b32 s11, s7
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s2
-; GCN-NEXT: s_mov_b32 s5, s3
-; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 offset:16
-; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0
-; GCN-NEXT: s_mov_b32 s8, s0
-; GCN-NEXT: s_mov_b32 s9, s1
+; GCN-NEXT: s_mov_b32 s8, s2
+; GCN-NEXT: s_mov_b32 s9, s3
+; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: v_readfirstlane_b32 s1, v1
-; GCN-NEXT: v_readfirstlane_b32 s2, v2
-; GCN-NEXT: s_abs_i32 s13, s0
-; GCN-NEXT: s_abs_i32 s14, s1
-; GCN-NEXT: s_abs_i32 s15, s2
-; GCN-NEXT: v_cvt_f32_u32_e32 v0, s13
-; GCN-NEXT: v_cvt_f32_u32_e32 v1, s14
-; GCN-NEXT: v_cvt_f32_u32_e32 v2, s15
-; GCN-NEXT: v_readfirstlane_b32 s6, v3
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v0
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v4
+; GCN-NEXT: v_xor_b32_e32 v8, v0, v4
+; GCN-NEXT: v_max_i32_e32 v4, v4, v10
+; GCN-NEXT: v_cvt_f32_u32_e32 v10, v4
+; GCN-NEXT: v_sub_i32_e32 v13, vcc, 0, v5
+; GCN-NEXT: v_xor_b32_e32 v11, v1, v5
+; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; GCN-NEXT: v_max_i32_e32 v5, v5, v13
+; GCN-NEXT: v_cvt_f32_u32_e32 v13, v5
+; GCN-NEXT: v_sub_i32_e32 v16, vcc, 0, v4
+; GCN-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; GCN-NEXT: v_cvt_u32_f32_e32 v10, v10
+; GCN-NEXT: v_rcp_iflag_f32_e32 v13, v13
+; GCN-NEXT: v_sub_i32_e32 v12, vcc, 0, v1
+; GCN-NEXT: v_mul_lo_u32 v16, v16, v10
+; GCN-NEXT: v_mul_f32_e32 v13, 0x4f7ffffe, v13
+; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13
+; GCN-NEXT: v_max_i32_e32 v0, v0, v9
+; GCN-NEXT: v_mul_hi_u32 v16, v10, v16
+; GCN-NEXT: v_max_i32_e32 v1, v1, v12
+; GCN-NEXT: v_sub_i32_e32 v15, vcc, 0, v6
+; GCN-NEXT: v_add_i32_e32 v10, vcc, v10, v16
+; GCN-NEXT: v_sub_i32_e32 v16, vcc, 0, v5
+; GCN-NEXT: v_mul_lo_u32 v16, v16, v13
+; GCN-NEXT: v_mul_hi_u32 v10, v0, v10
+; GCN-NEXT: v_xor_b32_e32 v14, v2, v6
+; GCN-NEXT: v_max_i32_e32 v6, v6, v15
+; GCN-NEXT: v_mul_hi_u32 v12, v13, v16
+; GCN-NEXT: v_cvt_f32_u32_e32 v15, v6
+; GCN-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v11
+; GCN-NEXT: v_add_i32_e32 v12, vcc, v13, v12
+; GCN-NEXT: v_mul_lo_u32 v13, v10, v4
+; GCN-NEXT: v_mul_hi_u32 v12, v1, v12
+; GCN-NEXT: v_rcp_iflag_f32_e32 v9, v15
+; GCN-NEXT: v_ashrrev_i32_e32 v14, 31, v14
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v13
+; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v10
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[0:1]
+; GCN-NEXT: v_sub_i32_e32 v13, vcc, v0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v13, s[0:1]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; GCN-NEXT: v_mul_lo_u32 v0, v12, v5
+; GCN-NEXT: v_mul_f32_e32 v9, 0x4f7ffffe, v9
+; GCN-NEXT: v_cvt_u32_f32_e32 v9, v9
+; GCN-NEXT: v_sub_i32_e32 v4, vcc, 0, v6
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT: v_add_i32_e32 v1, vcc, 1, v12
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; GCN-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[2:3]
+; GCN-NEXT: v_sub_i32_e32 v12, vcc, v0, v5
+; GCN-NEXT: v_mul_lo_u32 v4, v4, v9
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[2:3]
+; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, 0, v7
+; GCN-NEXT: v_max_i32_e32 v5, v7, v0
+; GCN-NEXT: v_cvt_f32_u32_e32 v0, v5
+; GCN-NEXT: v_mul_hi_u32 v4, v9, v4
+; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v10
; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT: s_abs_i32 s17, s6
-; GCN-NEXT: v_cvt_f32_u32_e32 v3, s17
+; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4
+; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v2
+; GCN-NEXT: v_max_i32_e32 v2, v2, v9
+; GCN-NEXT: v_mul_hi_u32 v4, v2, v4
; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_readfirstlane_b32 s3, v4
-; GCN-NEXT: v_readfirstlane_b32 s4, v5
-; GCN-NEXT: v_readfirstlane_b32 s5, v6
-; GCN-NEXT: s_xor_b32 s12, s3, s0
-; GCN-NEXT: s_xor_b32 s0, s4, s1
-; GCN-NEXT: s_xor_b32 s1, s5, s2
-; GCN-NEXT: s_sub_i32 s2, 0, s13
-; GCN-NEXT: s_ashr_i32 s18, s0, 31
-; GCN-NEXT: s_sub_i32 s0, 0, s14
-; GCN-NEXT: s_ashr_i32 s19, s1, 31
-; GCN-NEXT: s_sub_i32 s1, 0, s15
-; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GCN-NEXT: v_mul_lo_u32 v4, s2, v0
-; GCN-NEXT: v_mul_lo_u32 v5, s0, v1
-; GCN-NEXT: v_mul_lo_u32 v6, s1, v2
-; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT: v_mul_hi_u32 v4, v0, v4
-; GCN-NEXT: v_mul_hi_u32 v5, v1, v5
-; GCN-NEXT: v_mul_hi_u32 v6, v2, v6
-; GCN-NEXT: s_sub_i32 s20, 0, s17
-; GCN-NEXT: v_readfirstlane_b32 s7, v7
-; GCN-NEXT: s_abs_i32 s3, s3
-; GCN-NEXT: s_abs_i32 s4, s4
-; GCN-NEXT: s_abs_i32 s5, s5
-; GCN-NEXT: v_mul_lo_u32 v7, s20, v3
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v5
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v6
-; GCN-NEXT: v_mul_hi_u32 v0, s3, v0
-; GCN-NEXT: v_mul_hi_u32 v1, s4, v1
-; GCN-NEXT: v_mul_hi_u32 v2, s5, v2
-; GCN-NEXT: v_mul_hi_u32 v7, v3, v7
-; GCN-NEXT: v_mul_lo_u32 v4, v0, s13
-; GCN-NEXT: v_mul_lo_u32 v6, v1, s14
-; GCN-NEXT: v_mul_lo_u32 v8, v2, s15
-; GCN-NEXT: s_abs_i32 s16, s7
-; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GCN-NEXT: v_mul_hi_u32 v3, s16, v3
-; GCN-NEXT: v_sub_i32_e32 v4, vcc, s3, v4
-; GCN-NEXT: v_sub_i32_e32 v6, vcc, s4, v6
-; GCN-NEXT: v_sub_i32_e32 v8, vcc, s5, v8
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v0
-; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v1
-; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v2
-; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v4
-; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v6
-; GCN-NEXT: v_cmp_le_u32_e64 s[4:5], s15, v8
-; GCN-NEXT: v_subrev_i32_e32 v10, vcc, s13, v4
-; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1]
-; GCN-NEXT: v_subrev_i32_e32 v5, vcc, s14, v6
-; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[2:3]
-; GCN-NEXT: v_subrev_i32_e32 v7, vcc, s15, v8
-; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v0
-; GCN-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3]
-; GCN-NEXT: v_add_i32_e32 v6, vcc, 1, v1
-; GCN-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[4:5]
-; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v2
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s13, v4
-; GCN-NEXT: v_mul_lo_u32 v4, v3, s17
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s14, v5
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s15, v7
-; GCN-NEXT: s_ashr_i32 s12, s12, 31
-; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, s12, v0
-; GCN-NEXT: v_xor_b32_e32 v1, s18, v1
-; GCN-NEXT: v_xor_b32_e32 v2, s19, v2
-; GCN-NEXT: v_sub_i32_e32 v4, vcc, s16, v4
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s12, v0
-; GCN-NEXT: v_subrev_i32_e32 v1, vcc, s18, v1
-; GCN-NEXT: v_subrev_i32_e32 v2, vcc, s19, v2
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v3
-; GCN-NEXT: v_subrev_i32_e32 v6, vcc, s17, v4
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v3
-; GCN-NEXT: s_xor_b32 s0, s7, s6
-; GCN-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; GCN-NEXT: s_ashr_i32 s0, s0, 31
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT: v_xor_b32_e32 v3, s0, v3
-; GCN-NEXT: v_subrev_i32_e32 v3, vcc, s0, v3
-; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT: v_cvt_u32_f32_e32 v9, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v10, v13, s[0:1]
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v8
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-NEXT: v_mul_lo_u32 v8, v4, v6
+; GCN-NEXT: v_add_i32_e32 v12, vcc, 1, v1
+; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v5
+; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v8
+; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v12, s[2:3]
+; GCN-NEXT: v_mul_lo_u32 v10, v10, v9
+; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v4
+; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v2, v6
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v11
+; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1]
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, v2, v6
+; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v11
+; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v4
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6
+; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc
+; GCN-NEXT: v_mul_hi_u32 v4, v9, v10
+; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v3
+; GCN-NEXT: v_max_i32_e32 v6, v3, v6
+; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4
+; GCN-NEXT: v_mul_hi_u32 v4, v6, v4
+; GCN-NEXT: v_xor_b32_e32 v2, v2, v14
+; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v14
+; GCN-NEXT: v_mul_lo_u32 v8, v4, v5
+; GCN-NEXT: v_xor_b32_e32 v3, v3, v7
+; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-NEXT: v_sub_i32_e32 v8, vcc, v6, v5
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v3
+; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; GCN-NEXT: v_xor_b32_e32 v4, v4, v3
+; GCN-NEXT: v_sub_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; TONGA-LABEL: sdiv_v4i32:
; TONGA: ; %bb.0:
; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; TONGA-NEXT: s_mov_b32 s11, 0xf000
-; TONGA-NEXT: s_mov_b32 s10, -1
-; TONGA-NEXT: s_mov_b32 s6, s10
-; TONGA-NEXT: s_mov_b32 s7, s11
+; TONGA-NEXT: s_mov_b32 s7, 0xf000
+; TONGA-NEXT: s_mov_b32 s6, -1
+; TONGA-NEXT: s_mov_b32 s10, s6
+; TONGA-NEXT: s_mov_b32 s11, s7
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
-; TONGA-NEXT: s_mov_b32 s4, s2
-; TONGA-NEXT: s_mov_b32 s5, s3
-; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 offset:16
-; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0
-; TONGA-NEXT: s_mov_b32 s8, s0
-; TONGA-NEXT: s_mov_b32 s9, s1
+; TONGA-NEXT: s_mov_b32 s8, s2
+; TONGA-NEXT: s_mov_b32 s9, s3
+; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; TONGA-NEXT: s_mov_b32 s4, s0
+; TONGA-NEXT: s_mov_b32 s5, s1
; TONGA-NEXT: s_waitcnt vmcnt(1)
-; TONGA-NEXT: v_readfirstlane_b32 s0, v0
-; TONGA-NEXT: v_readfirstlane_b32 s1, v1
-; TONGA-NEXT: v_readfirstlane_b32 s2, v2
-; TONGA-NEXT: s_abs_i32 s13, s0
-; TONGA-NEXT: s_abs_i32 s14, s1
-; TONGA-NEXT: s_abs_i32 s15, s2
-; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s13
-; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s14
-; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s15
-; TONGA-NEXT: v_readfirstlane_b32 s6, v3
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v0
+; TONGA-NEXT: s_waitcnt vmcnt(0)
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v4
+; TONGA-NEXT: v_xor_b32_e32 v8, v0, v4
+; TONGA-NEXT: v_max_i32_e32 v4, v4, v10
+; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v4
+; TONGA-NEXT: v_sub_u32_e32 v13, vcc, 0, v5
+; TONGA-NEXT: v_xor_b32_e32 v11, v1, v5
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10
+; TONGA-NEXT: v_max_i32_e32 v5, v5, v13
+; TONGA-NEXT: v_cvt_f32_u32_e32 v13, v5
+; TONGA-NEXT: v_sub_u32_e32 v16, vcc, 0, v4
+; TONGA-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10
+; TONGA-NEXT: v_cvt_u32_f32_e32 v10, v10
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v13, v13
+; TONGA-NEXT: v_sub_u32_e32 v12, vcc, 0, v1
+; TONGA-NEXT: v_mul_lo_u32 v16, v16, v10
+; TONGA-NEXT: v_mul_f32_e32 v13, 0x4f7ffffe, v13
+; TONGA-NEXT: v_cvt_u32_f32_e32 v13, v13
+; TONGA-NEXT: v_max_i32_e32 v0, v0, v9
+; TONGA-NEXT: v_mul_hi_u32 v16, v10, v16
+; TONGA-NEXT: v_max_i32_e32 v1, v1, v12
+; TONGA-NEXT: v_sub_u32_e32 v15, vcc, 0, v6
+; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v16
+; TONGA-NEXT: v_sub_u32_e32 v16, vcc, 0, v5
+; TONGA-NEXT: v_mul_lo_u32 v16, v16, v13
+; TONGA-NEXT: v_mul_hi_u32 v10, v0, v10
+; TONGA-NEXT: v_xor_b32_e32 v14, v2, v6
+; TONGA-NEXT: v_max_i32_e32 v6, v6, v15
+; TONGA-NEXT: v_mul_hi_u32 v12, v13, v16
+; TONGA-NEXT: v_cvt_f32_u32_e32 v15, v6
+; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; TONGA-NEXT: v_ashrrev_i32_e32 v11, 31, v11
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, v13, v12
+; TONGA-NEXT: v_mul_lo_u32 v13, v10, v4
+; TONGA-NEXT: v_mul_hi_u32 v12, v1, v12
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v9, v15
+; TONGA-NEXT: v_ashrrev_i32_e32 v14, 31, v14
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v13
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, 1, v10
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; TONGA-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[0:1]
+; TONGA-NEXT: v_sub_u32_e32 v13, vcc, v0, v4
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v13, s[0:1]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
+; TONGA-NEXT: v_mul_lo_u32 v0, v12, v5
+; TONGA-NEXT: v_mul_f32_e32 v9, 0x4f7ffffe, v9
+; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v9
+; TONGA-NEXT: v_sub_u32_e32 v4, vcc, 0, v6
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, 1, v12
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; TONGA-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[2:3]
+; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v0, v5
+; TONGA-NEXT: v_mul_lo_u32 v4, v4, v9
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[2:3]
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, 0, v7
+; TONGA-NEXT: v_max_i32_e32 v5, v7, v0
+; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v5
+; TONGA-NEXT: v_mul_hi_u32 v4, v9, v4
+; TONGA-NEXT: v_add_u32_e32 v13, vcc, 1, v10
; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; TONGA-NEXT: s_abs_i32 s17, s6
-; TONGA-NEXT: v_cvt_f32_u32_e32 v3, s17
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4
+; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v2
+; TONGA-NEXT: v_max_i32_e32 v2, v2, v9
+; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4
; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0
-; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
-; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2
-; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_readfirstlane_b32 s3, v4
-; TONGA-NEXT: v_readfirstlane_b32 s4, v5
-; TONGA-NEXT: v_readfirstlane_b32 s5, v6
-; TONGA-NEXT: s_xor_b32 s12, s3, s0
-; TONGA-NEXT: s_xor_b32 s0, s4, s1
-; TONGA-NEXT: s_xor_b32 s1, s5, s2
-; TONGA-NEXT: s_sub_i32 s2, 0, s13
-; TONGA-NEXT: s_ashr_i32 s18, s0, 31
-; TONGA-NEXT: s_sub_i32 s0, 0, s14
-; TONGA-NEXT: s_ashr_i32 s19, s1, 31
-; TONGA-NEXT: s_sub_i32 s1, 0, s15
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_lo_u32 v4, s2, v0
-; TONGA-NEXT: v_mul_lo_u32 v5, s0, v1
-; TONGA-NEXT: v_mul_lo_u32 v6, s1, v2
-; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_hi_u32 v4, v0, v4
-; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5
-; TONGA-NEXT: v_mul_hi_u32 v6, v2, v6
-; TONGA-NEXT: s_sub_i32 s20, 0, s17
-; TONGA-NEXT: v_readfirstlane_b32 s7, v7
-; TONGA-NEXT: s_abs_i32 s3, s3
-; TONGA-NEXT: s_abs_i32 s4, s4
-; TONGA-NEXT: s_abs_i32 s5, s5
-; TONGA-NEXT: v_mul_lo_u32 v7, s20, v3
-; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v4
-; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5
-; TONGA-NEXT: v_add_u32_e32 v2, vcc, v2, v6
-; TONGA-NEXT: v_mul_hi_u32 v0, s3, v0
-; TONGA-NEXT: v_mul_hi_u32 v1, s4, v1
-; TONGA-NEXT: v_mul_hi_u32 v2, s5, v2
-; TONGA-NEXT: v_mul_hi_u32 v7, v3, v7
-; TONGA-NEXT: v_mul_lo_u32 v4, v0, s13
-; TONGA-NEXT: v_mul_lo_u32 v6, v1, s14
-; TONGA-NEXT: v_mul_lo_u32 v8, v2, s15
-; TONGA-NEXT: s_abs_i32 s16, s7
-; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v7
-; TONGA-NEXT: v_mul_hi_u32 v3, s16, v3
-; TONGA-NEXT: v_sub_u32_e32 v4, vcc, s3, v4
-; TONGA-NEXT: v_sub_u32_e32 v6, vcc, s4, v6
-; TONGA-NEXT: v_sub_u32_e32 v8, vcc, s5, v8
-; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v0
-; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v1
-; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v2
-; TONGA-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v4
-; TONGA-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v6
-; TONGA-NEXT: v_cmp_le_u32_e64 s[4:5], s15, v8
-; TONGA-NEXT: v_subrev_u32_e32 v10, vcc, s13, v4
-; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1]
-; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, s14, v6
-; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[2:3]
-; TONGA-NEXT: v_subrev_u32_e32 v7, vcc, s15, v8
-; TONGA-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[4:5]
-; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v0
-; TONGA-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3]
-; TONGA-NEXT: v_add_u32_e32 v6, vcc, 1, v1
-; TONGA-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[4:5]
-; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v2
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s13, v4
-; TONGA-NEXT: v_mul_lo_u32 v4, v3, s17
-; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s14, v5
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s15, v7
-; TONGA-NEXT: s_ashr_i32 s12, s12, 31
-; TONGA-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
-; TONGA-NEXT: v_xor_b32_e32 v0, s12, v0
-; TONGA-NEXT: v_xor_b32_e32 v1, s18, v1
-; TONGA-NEXT: v_xor_b32_e32 v2, s19, v2
-; TONGA-NEXT: v_sub_u32_e32 v4, vcc, s16, v4
-; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s12, v0
-; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s18, v1
-; TONGA-NEXT: v_subrev_u32_e32 v2, vcc, s19, v2
-; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3
-; TONGA-NEXT: v_subrev_u32_e32 v6, vcc, s17, v4
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3
-; TONGA-NEXT: s_xor_b32 s0, s7, s6
-; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s17, v4
-; TONGA-NEXT: s_ashr_i32 s0, s0, 31
-; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; TONGA-NEXT: v_xor_b32_e32 v3, s0, v3
-; TONGA-NEXT: v_subrev_u32_e32 v3, vcc, s0, v3
-; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v0
+; TONGA-NEXT: v_cndmask_b32_e64 v0, v10, v13, s[0:1]
+; TONGA-NEXT: v_xor_b32_e32 v0, v0, v8
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8
+; TONGA-NEXT: v_mul_lo_u32 v8, v4, v6
+; TONGA-NEXT: v_add_u32_e32 v12, vcc, 1, v1
+; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v5
+; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v8
+; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v12, s[2:3]
+; TONGA-NEXT: v_mul_lo_u32 v10, v10, v9
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v4
+; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v2, v6
+; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11
+; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1]
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v2, v6
+; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v11
+; TONGA-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1]
+; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v4
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6
+; TONGA-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc
+; TONGA-NEXT: v_mul_hi_u32 v4, v9, v10
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, 0, v3
+; TONGA-NEXT: v_max_i32_e32 v6, v3, v6
+; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4
+; TONGA-NEXT: v_mul_hi_u32 v4, v6, v4
+; TONGA-NEXT: v_xor_b32_e32 v2, v2, v14
+; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v14
+; TONGA-NEXT: v_mul_lo_u32 v8, v4, v5
+; TONGA-NEXT: v_xor_b32_e32 v3, v3, v7
+; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4
+; TONGA-NEXT: v_sub_u32_e32 v6, vcc, v6, v8
+; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v6, v5
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; TONGA-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4
+; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5
+; TONGA-NEXT: v_ashrrev_i32_e32 v3, 31, v3
+; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
+; TONGA-NEXT: v_xor_b32_e32 v4, v4, v3
+; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v4, v3
+; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; TONGA-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_v4i32:
@@ -2006,7 +1994,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_mul_lo_u32 v1, v3, v2
; GCN-NEXT: v_add_i32_e32 v4, vcc, 1, v3
; GCN-NEXT: v_sub_i32_e32 v1, vcc, v5, v1
-; GCN-NEXT: v_subrev_i32_e32 v5, vcc, v2, v1
+; GCN-NEXT: v_sub_i32_e32 v5, vcc, v1, v2
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
@@ -2014,7 +2002,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GCN-NEXT: v_xor_b32_e32 v1, v1, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 25
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
@@ -2053,7 +2041,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mul_lo_u32 v1, v3, v2
; TONGA-NEXT: v_add_u32_e32 v4, vcc, 1, v3
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v5, v1
-; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, v2, v1
+; TONGA-NEXT: v_sub_u32_e32 v5, vcc, v1, v2
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
@@ -2061,7 +2049,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
; TONGA-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v0
-; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 25
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
; TONGA-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/select.f16.ll b/llvm/test/CodeGen/AMDGPU/select.f16.ll
index bbdfc76..da454ee 100644
--- a/llvm/test/CodeGen/AMDGPU/select.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select.f16.ll
@@ -852,19 +852,19 @@ define amdgpu_kernel void @select_v2f16(
; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
; GFX11-TRUE16-NEXT: s_mov_b32 s22, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s23, s3
-; GFX11-TRUE16-NEXT: s_mov_b32 s26, s2
-; GFX11-TRUE16-NEXT: s_mov_b32 s27, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s18, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s19, s3
+; GFX11-TRUE16-NEXT: s_mov_b32 s26, s2
+; GFX11-TRUE16-NEXT: s_mov_b32 s27, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s6, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s7, s3
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_mov_b32 s20, s12
; GFX11-TRUE16-NEXT: s_mov_b32 s21, s13
-; GFX11-TRUE16-NEXT: s_mov_b32 s24, s14
-; GFX11-TRUE16-NEXT: s_mov_b32 s25, s15
; GFX11-TRUE16-NEXT: s_mov_b32 s16, s10
; GFX11-TRUE16-NEXT: s_mov_b32 s17, s11
+; GFX11-TRUE16-NEXT: s_mov_b32 s24, s14
+; GFX11-TRUE16-NEXT: s_mov_b32 s25, s15
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, off, s[20:23], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v1, off, s[16:19], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[24:27], 0
@@ -874,20 +874,18 @@ define amdgpu_kernel void @select_v2f16(
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
-; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v0.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v2, 16, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l
; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e64 s0, v5.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v6.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v3.l, v0.l, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v2.l, v1.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v7.l, v1.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s8
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -1058,21 +1056,18 @@ define amdgpu_kernel void @select_v2f16_imm_a(
; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e64 s0, 0x3900, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v5.l, v1.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -1236,21 +1231,18 @@ define amdgpu_kernel void @select_v2f16_imm_b(
; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX11-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, 0.5, v0.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX11-TRUE16-NEXT: v_cmp_gt_f16_e64 s0, 0x3900, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v4.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s0
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v5.l, v1.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
@@ -1402,8 +1394,6 @@ define amdgpu_kernel void @select_v2f16_imm_c(
; GFX11-TRUE16-NEXT: s_mov_b32 s19, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
-; GFX11-TRUE16-NEXT: s_mov_b32 s22, s2
-; GFX11-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_mov_b32 s16, s8
; GFX11-TRUE16-NEXT: s_mov_b32 s17, s9
@@ -1411,10 +1401,10 @@ define amdgpu_kernel void @select_v2f16_imm_c(
; GFX11-TRUE16-NEXT: s_mov_b32 s13, s7
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, off, s[16:19], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v1, off, s[12:15], 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s20, s10
-; GFX11-TRUE16-NEXT: s_mov_b32 s21, s11
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s10
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s11
; GFX11-TRUE16-NEXT: s_mov_b32 s1, s5
-; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[12:15], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -1425,12 +1415,9 @@ define amdgpu_kernel void @select_v2f16_imm_c(
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cmp_nlt_f16_e64 s0, v4.l, v3.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x3800, v2.l, vcc_lo
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x3900, v0.l, s0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x3900, v0.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
+; GFX11-TRUE16-NEXT: buffer_store_b32 v1, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
; GFX11-FAKE16-LABEL: select_v2f16_imm_c:
@@ -1581,8 +1568,6 @@ define amdgpu_kernel void @select_v2f16_imm_d(
; GFX11-TRUE16-NEXT: s_mov_b32 s19, s3
; GFX11-TRUE16-NEXT: s_mov_b32 s14, s2
; GFX11-TRUE16-NEXT: s_mov_b32 s15, s3
-; GFX11-TRUE16-NEXT: s_mov_b32 s22, s2
-; GFX11-TRUE16-NEXT: s_mov_b32 s23, s3
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_mov_b32 s16, s8
; GFX11-TRUE16-NEXT: s_mov_b32 s17, s9
@@ -1590,10 +1575,10 @@ define amdgpu_kernel void @select_v2f16_imm_d(
; GFX11-TRUE16-NEXT: s_mov_b32 s13, s7
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, off, s[16:19], 0
; GFX11-TRUE16-NEXT: buffer_load_b32 v1, off, s[12:15], 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s20, s10
-; GFX11-TRUE16-NEXT: s_mov_b32 s21, s11
+; GFX11-TRUE16-NEXT: s_mov_b32 s12, s10
+; GFX11-TRUE16-NEXT: s_mov_b32 s13, s11
; GFX11-TRUE16-NEXT: s_mov_b32 s1, s5
-; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[20:23], 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v2, off, s[12:15], 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
@@ -1604,12 +1589,9 @@ define amdgpu_kernel void @select_v2f16_imm_d(
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_cmp_lt_f16_e64 s0, v4.l, v3.l
; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, 0x3800, v2.l, vcc_lo
-; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x3900, v0.l, s0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, 0x3900, v0.l, s0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, s4
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1
-; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
+; GFX11-TRUE16-NEXT: buffer_store_b32 v1, off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
;
; GFX11-FAKE16-LABEL: select_v2f16_imm_d:
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index 5944342..bbd1793 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -467,28 +467,28 @@ define amdgpu_kernel void @srem_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_readfirstlane_b32 s2, v2
; GCN-NEXT: s_abs_i32 s2, s2
; GCN-NEXT: v_cvt_f32_u32_e32 v2, s2
-; GCN-NEXT: v_readfirstlane_b32 s3, v0
+; GCN-NEXT: v_readfirstlane_b32 s4, v0
; GCN-NEXT: s_sub_i32 s6, 0, s2
-; GCN-NEXT: s_ashr_i32 s5, s3, 31
+; GCN-NEXT: s_ashr_i32 s5, s4, 31
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT: s_abs_i32 s3, s3
-; GCN-NEXT: v_readfirstlane_b32 s4, v3
+; GCN-NEXT: s_abs_i32 s4, s4
+; GCN-NEXT: v_readfirstlane_b32 s3, v3
; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v2
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
; GCN-NEXT: v_readfirstlane_b32 s7, v0
; GCN-NEXT: s_mul_i32 s6, s6, s7
; GCN-NEXT: s_mul_hi_u32 s6, s7, s6
; GCN-NEXT: s_add_i32 s7, s7, s6
-; GCN-NEXT: s_mul_hi_u32 s6, s3, s7
+; GCN-NEXT: s_mul_hi_u32 s6, s4, s7
; GCN-NEXT: s_mul_i32 s6, s6, s2
-; GCN-NEXT: s_sub_i32 s3, s3, s6
-; GCN-NEXT: s_sub_i32 s6, s3, s2
-; GCN-NEXT: s_cmp_ge_u32 s3, s2
-; GCN-NEXT: s_cselect_b32 s3, s6, s3
-; GCN-NEXT: s_sub_i32 s6, s3, s2
-; GCN-NEXT: s_cmp_ge_u32 s3, s2
-; GCN-NEXT: s_cselect_b32 s2, s6, s3
-; GCN-NEXT: s_abs_i32 s3, s4
+; GCN-NEXT: s_sub_i32 s4, s4, s6
+; GCN-NEXT: s_sub_i32 s6, s4, s2
+; GCN-NEXT: s_cmp_ge_u32 s4, s2
+; GCN-NEXT: s_cselect_b32 s4, s6, s4
+; GCN-NEXT: s_sub_i32 s6, s4, s2
+; GCN-NEXT: s_cmp_ge_u32 s4, s2
+; GCN-NEXT: s_cselect_b32 s2, s6, s4
+; GCN-NEXT: s_abs_i32 s3, s3
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s3
; GCN-NEXT: s_xor_b32 s2, s2, s5
; GCN-NEXT: s_sub_i32 s7, 0, s3
diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
index 4a6202ea..6daea57 100644
--- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
@@ -788,12 +788,10 @@ define amdgpu_ps <2 x half> @s_constained_fsub_v2f16_fpexcept_strict(<2 x half>
;
; GFX11-SDAG-TRUE16-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
; GFX11-SDAG-TRUE16: ; %bb.0:
-; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v0.l, s2, s3
; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s0, s3, 16
; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s1, s2, 16
-; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v1.l, s1, s0
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v0.l, s2, s3
+; GFX11-SDAG-TRUE16-NEXT: v_sub_f16_e64 v0.h, s1, s0
; GFX11-SDAG-TRUE16-NEXT: ; return to shader part epilog
;
; GFX11-SDAG-FAKE16-LABEL: s_constained_fsub_v2f16_fpexcept_strict:
diff --git a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
index cd1c532..6a273e5 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
@@ -813,7 +813,7 @@ define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
@@ -825,11 +825,9 @@ define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(ptr addrspace(1) %out,
; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1
; GFX11-TRUE16-NEXT: v_pk_sub_i16 v0, v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v2, v2, 16, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v1
; GFX11-TRUE16-NEXT: buffer_store_b128 v[0:3], off, s[0:3], 0
; GFX11-TRUE16-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
index c9b94e0..99b6ab7 100644
--- a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
@@ -189,14 +189,11 @@ define amdgpu_kernel void @basic_smax_smin_sgpr(ptr addrspace(1) %out, i32 inreg
; SDAG-GFX11-TRUE16-LABEL: basic_smax_smin_sgpr:
; SDAG-GFX11-TRUE16: ; %bb.0:
; SDAG-GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 0
; SDAG-GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v0.l, s2, 0, 0xff
-; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.l, s3, 0, 0xff
-; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SDAG-GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; SDAG-GFX11-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.l, s2, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: v_med3_i16 v1.h, s3, 0, 0xff
+; SDAG-GFX11-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; SDAG-GFX11-TRUE16-NEXT: s_endpgm
;
; SDAG-GFX11-FAKE16-LABEL: basic_smax_smin_sgpr:
@@ -215,14 +212,11 @@ define amdgpu_kernel void @basic_smax_smin_sgpr(ptr addrspace(1) %out, i32 inreg
; SDAG-GFX12-TRUE16-LABEL: basic_smax_smin_sgpr:
; SDAG-GFX12-TRUE16: ; %bb.0:
; SDAG-GFX12-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, 0
+; SDAG-GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, 0
; SDAG-GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v0.l, s2, 0, 0xff
-; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v1.l, s3, 0, 0xff
-; SDAG-GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; SDAG-GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SDAG-GFX12-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; SDAG-GFX12-TRUE16-NEXT: global_store_b32 v2, v0, s[0:1]
+; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v1.l, s2, 0, 0xff
+; SDAG-GFX12-TRUE16-NEXT: v_med3_i16 v1.h, s3, 0, 0xff
+; SDAG-GFX12-TRUE16-NEXT: global_store_b32 v0, v1, s[0:1]
; SDAG-GFX12-TRUE16-NEXT: s_endpgm
;
; SDAG-GFX12-FAKE16-LABEL: basic_smax_smin_sgpr:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll
index 801324e..dfc59f6 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-and.ll
@@ -1023,10 +1023,11 @@ define i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_and_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_and_v2i16:
@@ -1052,10 +1053,11 @@ define i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_and_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
index 98919f5..4d5ade4 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
@@ -1024,10 +1024,11 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_mul_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v2i16:
@@ -1053,10 +1054,11 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v2i16:
@@ -1298,11 +1300,12 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v4i16:
@@ -1331,11 +1334,12 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v4i16:
@@ -1468,12 +1472,13 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v8i16:
@@ -1509,12 +1514,13 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v8i16:
@@ -1706,12 +1712,13 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v16i16:
@@ -1762,12 +1769,13 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_mul_v16i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll
index bdb1c22..9e033f5 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-or.ll
@@ -1046,10 +1046,11 @@ define i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_or_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_or_v2i16:
@@ -1075,10 +1076,11 @@ define i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_or_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll
index cf344ea..166e6c4 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-xor.ll
@@ -992,10 +992,11 @@ define i16 @test_vector_reduce_xor_v2i16(<2 x i16> %v) {
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_xor_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX11-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-FAKE16-LABEL: test_vector_reduce_xor_v2i16:
@@ -1021,10 +1022,11 @@ define i16 @test_vector_reduce_xor_v2i16(<2 x i16> %v) {
; GFX12-SDAG-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, s0, 16, v1
-; GFX12-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX12-SDAG-TRUE16-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX12-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-SDAG-FAKE16-LABEL: test_vector_reduce_xor_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll b/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll
index 07e9325..5045540 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_rebroadcast.ll
@@ -455,10 +455,7 @@ define <2 x i16> @shuffle_v2i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: shuffle_v2i16_rebroadcast:
@@ -499,10 +496,8 @@ define <4 x i16> @shuffle_v4i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -550,10 +545,8 @@ define <8 x i16> @shuffle_v8i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
@@ -613,10 +606,8 @@ define <16 x i16> @shuffle_v16i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
@@ -700,10 +691,8 @@ define <32 x i16> @shuffle_v32i16_rebroadcast(ptr addrspace(1) %arg0) {
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v0
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v0
diff --git a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
index b01e92d..6bf6d54 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
@@ -1288,9 +1288,8 @@ define <4 x i16> @shuffle_v4i16_2356(ptr addrspace(1) %arg0, ptr addrspace(1) %a
; GFX11-TRUE16-NEXT: global_load_b64 v[2:3], v[2:3], off
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off offset:4
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v3.l
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -2571,10 +2570,9 @@ define <2 x i16> @i16_hi16low16bits(ptr addrspace(1) %x0, ptr addrspace(1) %x1)
; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
; GFX11-TRUE16-NEXT: global_load_b32 v1, v[2:3], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: i16_hi16low16bits:
@@ -2626,14 +2624,10 @@ define <2 x i16> @i16_hi16bits(ptr addrspace(1) %x0, ptr addrspace(1) %x1) {
; GFX11-TRUE16-LABEL: i16_hi16bits:
; GFX11-TRUE16: ; %bb.0: ; %entry
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: global_load_b32 v2, v[2:3], off
-; GFX11-TRUE16-NEXT: global_load_b32 v0, v[0:1], off
-; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
+; GFX11-TRUE16-NEXT: global_load_b32 v1, v[0:1], off
+; GFX11-TRUE16-NEXT: global_load_b32 v0, v[2:3], off
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: i16_hi16bits:
diff --git a/llvm/test/CodeGen/RISCV/div_minsize.ll b/llvm/test/CodeGen/RISCV/div_minsize.ll
index 601821b..794af2f 100644
--- a/llvm/test/CodeGen/RISCV/div_minsize.ll
+++ b/llvm/test/CodeGen/RISCV/div_minsize.ll
@@ -68,3 +68,151 @@ define i32 @testsize4(i32 %x) minsize nounwind {
%div = udiv i32 %x, 33
ret i32 %div
}
+
+define i128 @i128_sdiv(i128 %arg0) minsize nounwind {
+; RV32IM-LABEL: i128_sdiv:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: lw a2, 12(a1)
+; RV32IM-NEXT: lw a3, 8(a1)
+; RV32IM-NEXT: lw a4, 0(a1)
+; RV32IM-NEXT: lw a1, 4(a1)
+; RV32IM-NEXT: srai a5, a2, 31
+; RV32IM-NEXT: srli a5, a5, 30
+; RV32IM-NEXT: add a5, a4, a5
+; RV32IM-NEXT: sltu a4, a5, a4
+; RV32IM-NEXT: srli a5, a5, 2
+; RV32IM-NEXT: add a6, a1, a4
+; RV32IM-NEXT: sltu a1, a6, a1
+; RV32IM-NEXT: and a1, a4, a1
+; RV32IM-NEXT: srli a4, a6, 2
+; RV32IM-NEXT: slli a6, a6, 30
+; RV32IM-NEXT: or a5, a5, a6
+; RV32IM-NEXT: add a1, a3, a1
+; RV32IM-NEXT: srli a6, a1, 2
+; RV32IM-NEXT: sltu a3, a1, a3
+; RV32IM-NEXT: slli a1, a1, 30
+; RV32IM-NEXT: add a2, a2, a3
+; RV32IM-NEXT: or a1, a4, a1
+; RV32IM-NEXT: slli a3, a2, 30
+; RV32IM-NEXT: srai a2, a2, 2
+; RV32IM-NEXT: or a3, a6, a3
+; RV32IM-NEXT: sw a5, 0(a0)
+; RV32IM-NEXT: sw a1, 4(a0)
+; RV32IM-NEXT: sw a3, 8(a0)
+; RV32IM-NEXT: sw a2, 12(a0)
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: i128_sdiv:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: addi sp, sp, -16
+; RV64IM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IM-NEXT: li a2, 4
+; RV64IM-NEXT: li a3, 0
+; RV64IM-NEXT: call __divti3
+; RV64IM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IM-NEXT: addi sp, sp, 16
+; RV64IM-NEXT: ret
+ %div = sdiv i128 %arg0, 4
+ ret i128 %div
+}
+
+define i256 @i256_sdiv(i256 %arg0) minsize nounwind {
+; RV32IM-LABEL: i256_sdiv:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: lw a5, 16(a1)
+; RV32IM-NEXT: lw a4, 20(a1)
+; RV32IM-NEXT: lw a2, 24(a1)
+; RV32IM-NEXT: lw a3, 28(a1)
+; RV32IM-NEXT: lw a6, 0(a1)
+; RV32IM-NEXT: lw a7, 4(a1)
+; RV32IM-NEXT: lw t0, 8(a1)
+; RV32IM-NEXT: lw t1, 12(a1)
+; RV32IM-NEXT: srai a1, a3, 31
+; RV32IM-NEXT: srli a1, a1, 30
+; RV32IM-NEXT: add a1, a6, a1
+; RV32IM-NEXT: sltu t2, a1, a6
+; RV32IM-NEXT: add a6, a7, t2
+; RV32IM-NEXT: sltu a7, a6, a7
+; RV32IM-NEXT: and t2, t2, a7
+; RV32IM-NEXT: add a7, t0, t2
+; RV32IM-NEXT: sltu t3, a7, t0
+; RV32IM-NEXT: add t0, t1, t3
+; RV32IM-NEXT: beqz t2, .LBB5_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: sltu t1, t0, t1
+; RV32IM-NEXT: and t2, t3, t1
+; RV32IM-NEXT: .LBB5_2:
+; RV32IM-NEXT: add t2, a5, t2
+; RV32IM-NEXT: srli t1, t0, 2
+; RV32IM-NEXT: srli t3, a7, 2
+; RV32IM-NEXT: slli t0, t0, 30
+; RV32IM-NEXT: slli a7, a7, 30
+; RV32IM-NEXT: or t0, t3, t0
+; RV32IM-NEXT: srli t3, a6, 2
+; RV32IM-NEXT: srli a1, a1, 2
+; RV32IM-NEXT: slli a6, a6, 30
+; RV32IM-NEXT: sltu a5, t2, a5
+; RV32IM-NEXT: or a7, t3, a7
+; RV32IM-NEXT: srli t3, t2, 2
+; RV32IM-NEXT: slli t2, t2, 30
+; RV32IM-NEXT: or a1, a1, a6
+; RV32IM-NEXT: add a6, a4, a5
+; RV32IM-NEXT: or t1, t1, t2
+; RV32IM-NEXT: sltu a4, a6, a4
+; RV32IM-NEXT: srli t2, a6, 2
+; RV32IM-NEXT: slli a6, a6, 30
+; RV32IM-NEXT: sw a1, 0(a0)
+; RV32IM-NEXT: sw a7, 4(a0)
+; RV32IM-NEXT: sw t0, 8(a0)
+; RV32IM-NEXT: sw t1, 12(a0)
+; RV32IM-NEXT: and a4, a5, a4
+; RV32IM-NEXT: or a1, t3, a6
+; RV32IM-NEXT: add a4, a2, a4
+; RV32IM-NEXT: srli a5, a4, 2
+; RV32IM-NEXT: sltu a2, a4, a2
+; RV32IM-NEXT: slli a4, a4, 30
+; RV32IM-NEXT: add a2, a3, a2
+; RV32IM-NEXT: or a3, t2, a4
+; RV32IM-NEXT: slli a4, a2, 30
+; RV32IM-NEXT: srai a2, a2, 2
+; RV32IM-NEXT: or a4, a5, a4
+; RV32IM-NEXT: sw a1, 16(a0)
+; RV32IM-NEXT: sw a3, 20(a0)
+; RV32IM-NEXT: sw a4, 24(a0)
+; RV32IM-NEXT: sw a2, 28(a0)
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: i256_sdiv:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: ld a2, 24(a1)
+; RV64IM-NEXT: ld a3, 16(a1)
+; RV64IM-NEXT: ld a4, 0(a1)
+; RV64IM-NEXT: ld a1, 8(a1)
+; RV64IM-NEXT: srai a5, a2, 63
+; RV64IM-NEXT: srli a5, a5, 62
+; RV64IM-NEXT: add a5, a4, a5
+; RV64IM-NEXT: sltu a4, a5, a4
+; RV64IM-NEXT: srli a5, a5, 2
+; RV64IM-NEXT: add a6, a1, a4
+; RV64IM-NEXT: sltu a1, a6, a1
+; RV64IM-NEXT: and a1, a4, a1
+; RV64IM-NEXT: srli a4, a6, 2
+; RV64IM-NEXT: slli a6, a6, 62
+; RV64IM-NEXT: or a5, a5, a6
+; RV64IM-NEXT: add a1, a3, a1
+; RV64IM-NEXT: srli a6, a1, 2
+; RV64IM-NEXT: sltu a3, a1, a3
+; RV64IM-NEXT: slli a1, a1, 62
+; RV64IM-NEXT: add a2, a2, a3
+; RV64IM-NEXT: or a1, a4, a1
+; RV64IM-NEXT: slli a3, a2, 62
+; RV64IM-NEXT: srai a2, a2, 2
+; RV64IM-NEXT: or a3, a6, a3
+; RV64IM-NEXT: sd a5, 0(a0)
+; RV64IM-NEXT: sd a1, 8(a0)
+; RV64IM-NEXT: sd a3, 16(a0)
+; RV64IM-NEXT: sd a2, 24(a0)
+; RV64IM-NEXT: ret
+ %div = sdiv i256 %arg0, 4
+ ret i256 %div
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll
new file mode 100644
index 0000000..489323b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen, iXLen);
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ iXLen);
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @test_half_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x half> %3, <vscale x 1 x half> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_half_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v10, v10, v11
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ call void @llvm.riscv.vse(<vscale x 1 x half> %a, ptr %ptr, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x bfloat> @test_i32_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i32> %3, <vscale x 1 x i32> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_i32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v10, v10, v11
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: vse32.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> poison,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i32> %4,
+ iXLen %2)
+
+ %b = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ call void @llvm.riscv.vse(<vscale x 1 x i32> %a, ptr %ptr, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x bfloat> @test_half_bf16_half(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x half> %3, <vscale x 1 x half> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_half_bf16_half:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v10, v10, v11
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v9, v10, v11
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ %c = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %a,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ store <vscale x 1 x half> %c, ptr %ptr
+
+ ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x bfloat> @test_bf16_half_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x half> %3, <vscale x 1 x half> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_bf16_half_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v10, v10, v11
+; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vse16.v v10, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> poison,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x half> %4,
+ iXLen 0, iXLen %2)
+
+ %c = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %a,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ store <vscale x 1 x half> %b, ptr %ptr
+
+ ret <vscale x 1 x bfloat> %c
+}
+
+define <vscale x 1 x bfloat> @test_bf16_i16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i16> %3, <vscale x 1 x i16> %4, ptr %ptr) nounwind {
+; CHECK-LABEL: test_bf16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: vadd.vv v9, v10, v11
+; CHECK-NEXT: fsrm a2
+; CHECK-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; CHECK-NEXT: vse16.v v9, (a1)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ %b = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> poison,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i16> %4,
+ iXLen %2)
+
+ store <vscale x 1 x i16> %b, ptr %ptr
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll
new file mode 100644
index 0000000..db1b081
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll
@@ -0,0 +1,607 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll
new file mode 100644
index 0000000..d7d49b3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 1 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 2 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 4 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 8 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfclass.v v8, v10, v0.t
+; CHECK-NEXT: ret
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 16 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfclass.v v8, v12, v0.t
+; CHECK-NEXT: ret
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32bf16(
+; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: ret
+ <vscale x 32 x bfloat> %0,
+ iXLen %1) nounwind {
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, tu, mu
+; CHECK-NEXT: vfclass.v v8, v16, v0.t
+; CHECK-NEXT: ret
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 32 x i16> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll
new file mode 100644
index 0000000..13821d7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll
new file mode 100644
index 0000000..09fc199
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmadd.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmadd.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmacc.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmacc.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll
new file mode 100644
index 0000000..a337d30
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmax.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmax.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll
new file mode 100644
index 0000000..86ba7c7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 1 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 2 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 4 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 8 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 16 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x fa5, zero
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat zeroinitializer,
+ <vscale x 32 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll
new file mode 100644
index 0000000..37c0cf5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmin.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll
new file mode 100644
index 0000000..948d219
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsac.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll
new file mode 100644
index 0000000..6838f37
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsub.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfmsub.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsub.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsac.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmsac.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll
new file mode 100644
index 0000000..44bce72
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll
@@ -0,0 +1,607 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmul.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll
new file mode 100644
index 0000000..fbc73119
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+experimental-zvfbfa -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+experimental-zvfbfa -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv1bf16(<vscale x 1 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv2bf16(<vscale x 2 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv4bf16(<vscale x 4 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv8bf16(<vscale x 8 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv16bf16(<vscale x 16 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> %0)
+ ret bfloat %a
+}
+
+declare bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat>)
+
+define bfloat @intrinsic_vfmv.f.s_s_nxv32bf16(<vscale x 32 x bfloat> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: fmv.h.x fa0, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> %0)
+ ret bfloat %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll
new file mode 100644
index 0000000..a810809
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll
@@ -0,0 +1,161 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat>, bfloat, iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.s.f_f_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16(<vscale x 2 x bfloat>, bfloat, iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vfmv.s.f_f_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16(<vscale x 4 x bfloat>, bfloat, iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vfmv.s.f_f_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16(<vscale x 8 x bfloat>, bfloat, iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vfmv.s.f_f_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16(<vscale x 16 x bfloat>, bfloat, iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vfmv.s.f_f_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16(<vscale x 32 x bfloat>, bfloat, iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vfmv.s.f_f_nxv32bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2)
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16(<vscale x 2 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16(<vscale x 4 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16(<vscale x 8 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16(<vscale x 16 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfmv.s.f_f_zero_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16(<vscale x 32 x bfloat> %0, bfloat 0.0, iXLen %1)
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.s.f_f_nxv1bf16_negzero(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16_negzero:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 1048568
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vmv.s.x v8, a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16(<vscale x 1 x bfloat> %0, bfloat -0.0, iXLen %1)
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll
new file mode 100644
index 0000000..f3293dd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ bfloat %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfmv.v.f_zero_nxv1bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vmv.v.i_zero_nxv2bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vmv.v.i_zero_nxv4bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vmv.v.i_zero_nxv8bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vmv.v.i_zero_nxv16bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vmv.v.i_zero_nxv32bf16(iXLen %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ bfloat 0.0,
+ iXLen %0)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll
new file mode 100644
index 0000000..7d587fd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll
@@ -0,0 +1,226 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x float>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32(<vscale x 1 x bfloat> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x float>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32(<vscale x 2 x bfloat> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x float>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32(<vscale x 4 x bfloat> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x float>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32(<vscale x 8 x bfloat> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x float>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x float> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x float>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32(<vscale x 16 x bfloat> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll
new file mode 100644
index 0000000..ee9e3d1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll
@@ -0,0 +1,270 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll
new file mode 100644
index 0000000..521f727
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll
@@ -0,0 +1,270 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll
new file mode 100644
index 0000000..ab9ebad
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v10, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v12, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.x.f.w v16, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll
new file mode 100644
index 0000000..61c6803
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v10, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v12, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v16, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll
new file mode 100644
index 0000000..4b4091b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmacc.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll
new file mode 100644
index 0000000..2bb6bf5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmadd.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmadd.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmacc.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmacc.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll
new file mode 100644
index 0000000..cfbaafa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsac.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll
new file mode 100644
index 0000000..5ebbb90c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v10, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsub.vv v8, v12, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16(<vscale x 2 x bfloat> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16(<vscale x 4 x bfloat> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16(<vscale x 8 x bfloat> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16(<vscale x 16 x bfloat> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0);
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsub.vv v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %2,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsac.vv v8, v10, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16_commute(<vscale x 1 x bfloat> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfnmsac.vf v8, fa0, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %2,
+ bfloat %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 7, iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll
new file mode 100644
index 0000000..1211415
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll
@@ -0,0 +1,282 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen 0, iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x i1> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16(
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %0,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll
new file mode 100644
index 0000000..4626b86
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x i1> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16(
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll
new file mode 100644
index 0000000..54a6d48
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll
@@ -0,0 +1,282 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfrsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll
new file mode 100644
index 0000000..2cd698d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll
new file mode 100644
index 0000000..08340be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll
new file mode 100644
index 0000000..e51a42e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll
@@ -0,0 +1,571 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjx.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll
new file mode 100644
index 0000000..c65719c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll
new file mode 100644
index 0000000..57a4898
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfslide1up.vf v10, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfslide1up.vf v12, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfslide1up.vf v16, v8, fa0
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll
new file mode 100644
index 0000000..aea7521
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll
@@ -0,0 +1,559 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x bfloat> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ <vscale x 32 x bfloat> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma
+; CHECK-NEXT: vfsub.vf v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x bfloat> %0,
+ bfloat %1,
+ iXLen 7, iXLen %2)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x bfloat>,
+ bfloat,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, bfloat %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu
+; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x bfloat> %1,
+ bfloat %2,
+ <vscale x 32 x i1> %3,
+ iXLen 7, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll
new file mode 100644
index 0000000..62feac8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vv v8, v11, v10
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vv v8, v14, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv4r.v v20, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vv v8, v20, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vf v8, v10, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vf v8, v12, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.vf v8, v16, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll
new file mode 100644
index 0000000..c5417e8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll
@@ -0,0 +1,773 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwadd.wv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl4re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwadd.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwadd.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.wv v8, v10, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwadd.wv v8, v12, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll
new file mode 100644
index 0000000..b7df45b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll
new file mode 100644
index 0000000..c370261
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> poison,
+ <vscale x 1 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> poison,
+ <vscale x 2 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> poison,
+ <vscale x 4 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> poison,
+ <vscale x 8 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> poison,
+ <vscale x 16 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> poison,
+ <vscale x 32 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8(
+ <vscale x 32 x bfloat> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x bfloat> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll
new file mode 100644
index 0000000..a3f6678
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll
@@ -0,0 +1,506 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v10, v11
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v12, v14
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwmsac.vv v8, v16, v20
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll
new file mode 100644
index 0000000..577b93a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwmul.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwmul.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vv v8, v11, v10
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vv v8, v14, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv4r.v v20, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vv v8, v20, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwmul.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwmul.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vf v8, v10, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vf v8, v12, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwmul.vf v8, v16, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll
new file mode 100644
index 0000000..1e05e4c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll
@@ -0,0 +1,506 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v10, v11
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v12, v14
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmacc.vv v8, v16, v20
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll
new file mode 100644
index 0000000..223ad4f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll
@@ -0,0 +1,506 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v10, v11
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v12, v14
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmsac.vv v8, v16, v20
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen 0, iXLen %3, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu
+; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 0)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll
new file mode 100644
index 0000000..d993e4e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.vv v10, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vv v8, v11, v10
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vv v8, v14, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv4r.v v20, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vv v8, v20, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.vf v9, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vf v8, v10, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vf v8, v12, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.vf v8, v16, fa0
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll
new file mode 100644
index 0000000..b22899a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll
@@ -0,0 +1,773 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwsub.wv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl4re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16(
+ <vscale x 16 x float> poison,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %0,
+ bfloat %1,
+ <vscale x 1 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %0,
+ bfloat %1,
+ <vscale x 2 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %0,
+ bfloat %1,
+ <vscale x 4 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %0,
+ bfloat %1,
+ <vscale x 8 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %0,
+ bfloat %1,
+ <vscale x 16 x i1> %2,
+ iXLen 0, iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vfwsub.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16(
+ <vscale x 1 x float> poison,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vfwsub.wv v10, v9, v8
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16(
+ <vscale x 2 x float> poison,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.wv v8, v10, v12
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16(
+ <vscale x 4 x float> poison,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vfwsub.wv v8, v12, v16
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(
+ <vscale x 8 x float> poison,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x bfloat> %0,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll
new file mode 100644
index 0000000..9bd859b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v10
+; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfeq.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfeq.vv v0, v8, v12
+; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfeq.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll
new file mode 100644
index 0000000..73946dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v10, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfle.vv v0, v10, v8
+; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v12, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfle.vv v0, v12, v8
+; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfge.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll
new file mode 100644
index 0000000..fac324c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v10, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmflt.vv v0, v10, v8
+; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v12, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmflt.vv v0, v12, v8
+; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfgt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll
new file mode 100644
index 0000000..8356b7b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v10
+; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfle.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfle.vv v0, v8, v12
+; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfle.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll
new file mode 100644
index 0000000..2e1bcc5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v10
+; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmflt.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmflt.vv v0, v8, v12
+; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmflt.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll
new file mode 100644
index 0000000..283ffc5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll
@@ -0,0 +1,496 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16(
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16(
+ <vscale x 1 x bfloat> %0,
+ <vscale x 1 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x bfloat>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16(
+ <vscale x 1 x bfloat> %1,
+ <vscale x 1 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %2,
+ <vscale x 1 x bfloat> %3,
+ <vscale x 1 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16(
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16(
+ <vscale x 2 x bfloat> %0,
+ <vscale x 2 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x bfloat>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16(
+ <vscale x 2 x bfloat> %1,
+ <vscale x 2 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %2,
+ <vscale x 2 x bfloat> %3,
+ <vscale x 2 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16(
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16(
+ <vscale x 4 x bfloat> %0,
+ <vscale x 4 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x bfloat>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
+; CHECK-NEXT: vmv.v.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16(
+ <vscale x 4 x bfloat> %1,
+ <vscale x 4 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %2,
+ <vscale x 4 x bfloat> %3,
+ <vscale x 4 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16(
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16(
+ <vscale x 8 x bfloat> %0,
+ <vscale x 8 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x bfloat>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v14, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v10
+; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16(
+ <vscale x 8 x bfloat> %1,
+ <vscale x 8 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %2,
+ <vscale x 8 x bfloat> %3,
+ <vscale x 8 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16(
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfne.vv v0, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16(
+ <vscale x 16 x bfloat> %0,
+ <vscale x 16 x bfloat> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x bfloat>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x bfloat> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v20, v0
+; CHECK-NEXT: vmfne.vv v0, v8, v12
+; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: ret
+entry:
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16(
+ <vscale x 16 x bfloat> %1,
+ <vscale x 16 x bfloat> %2,
+ iXLen %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %2,
+ <vscale x 16 x bfloat> %3,
+ <vscale x 16 x i1> %mask,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16(
+ <vscale x 1 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1bf16_bf16(<vscale x 1 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16(
+ <vscale x 1 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x bfloat>,
+ bfloat,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1bf16_bf16(<vscale x 1 x i1> %0, <vscale x 1 x bfloat> %1, bfloat %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x bfloat> %1,
+ bfloat %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16(
+ <vscale x 2 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2bf16_bf16(<vscale x 2 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16(
+ <vscale x 2 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x bfloat>,
+ bfloat,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2bf16_bf16(<vscale x 2 x i1> %0, <vscale x 2 x bfloat> %1, bfloat %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x bfloat> %1,
+ bfloat %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16(
+ <vscale x 4 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4bf16_bf16(<vscale x 4 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16(
+ <vscale x 4 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x bfloat>,
+ bfloat,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4bf16_bf16(<vscale x 4 x i1> %0, <vscale x 4 x bfloat> %1, bfloat %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
+; CHECK-NEXT: vmv.v.v v0, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x bfloat> %1,
+ bfloat %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16(
+ <vscale x 8 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8bf16_bf16(<vscale x 8 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16(
+ <vscale x 8 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x bfloat>,
+ bfloat,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8bf16_bf16(<vscale x 8 x i1> %0, <vscale x 8 x bfloat> %1, bfloat %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x bfloat> %1,
+ bfloat %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16(
+ <vscale x 16 x bfloat>,
+ bfloat,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16bf16_bf16(<vscale x 16 x bfloat> %0, bfloat %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; CHECK-NEXT: vmfne.vf v0, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16(
+ <vscale x 16 x bfloat> %0,
+ bfloat %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x bfloat>,
+ bfloat,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16bf16_bf16(<vscale x 16 x i1> %0, <vscale x 16 x bfloat> %1, bfloat %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16bf16_bf16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v13, v0
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x bfloat> %1,
+ bfloat %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
new file mode 100644
index 0000000..cce1eda
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s --match-full-lines
+
+%"__cblayout_$Globals" = type <{ i32 }>
+
+@i = external hidden local_unnamed_addr addrspace(12) global i32, align 4
+@ReadWriteBuf.str = private unnamed_addr constant [13 x i8] c"ReadWriteBuf\00", align 1
+@"$Globals.cb" = local_unnamed_addr global target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) poison
+@"$Globals.str" = private unnamed_addr constant [9 x i8] c"$Globals\00", align 1
+
+; CHECK: OpCapability Shader
+; CHECK: OpCapability StorageTexelBufferArrayDynamicIndexingEXT
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %"$Globals.cb_h.i.i" = tail call target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) @"llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_tspirv.Layout_s___cblayout_$Globalss_4_0t_2_0t"(i32 1, i32 0, i32 1, i32 0, ptr nonnull @"$Globals.str")
+ store target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) %"$Globals.cb_h.i.i", ptr @"$Globals.cb", align 8
+ %0 = load i32, ptr addrspace(12) @i, align 4
+ %1 = tail call target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_i32_5_2_0_0_2_33t(i32 0, i32 0, i32 64, i32 %0, ptr nonnull @ReadWriteBuf.str)
+ %2 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 98)
+ store i32 99, ptr addrspace(11) %2, align 4
+ ret void
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
new file mode 100644
index 0000000..da69a2f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s --match-full-lines
+
+%"__cblayout_$Globals" = type <{ i32 }>
+
+@i = external hidden local_unnamed_addr addrspace(12) global i32, align 4
+@ReadWriteStructuredBuf.str = private unnamed_addr constant [23 x i8] c"ReadWriteStructuredBuf\00", align 1
+@"$Globals.cb" = local_unnamed_addr global target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) poison
+@"$Globals.str" = private unnamed_addr constant [9 x i8] c"$Globals\00", align 1
+
+; CHECK: OpCapability Shader
+; CHECK: OpCapability StorageBufferArrayDynamicIndexing
+define void @main() local_unnamed_addr #0 {
+entry:
+ %"$Globals.cb_h.i.i" = tail call target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) @"llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_tspirv.Layout_s___cblayout_$Globalss_4_0t_2_0t"(i32 2, i32 0, i32 1, i32 0, ptr nonnull @"$Globals.str")
+ store target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) %"$Globals.cb_h.i.i", ptr @"$Globals.cb", align 8
+ %0 = load i32, ptr addrspace(12) @i, align 4
+ %1 = tail call target("spirv.VulkanBuffer", [0 x i32], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0i32_12_1t(i32 0, i32 0, i32 64, i32 %0, ptr nonnull @ReadWriteStructuredBuf.str)
+ %2 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0i32_12_1t(target("spirv.VulkanBuffer", [0 x i32], 12, 1) %1, i32 99)
+ store i32 98, ptr addrspace(11) %2, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/StructuredBufferNonUniformIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWBufferNonUniformIdx.ll
index 92efad9..92efad9 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/StructuredBufferNonUniformIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWBufferNonUniformIdx.ll
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll
index 2a12baf..a820e7a 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/NonUniformIdx/RWStructuredBufferNonUniformIdx.ll
@@ -3,6 +3,7 @@
; CHECK-DAG: OpCapability Shader
; CHECK-DAG: OpCapability ShaderNonUniformEXT
+; CHECK-DAG: OpCapability StorageBufferArrayNonUniformIndexingEXT
; CHECK-DAG: OpDecorate {{%[0-9]+}} NonUniformEXT
; CHECK-DAG: OpDecorate {{%[0-9]+}} NonUniformEXT
; CHECK-DAG: OpDecorate {{%[0-9]+}} NonUniformEXT
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageDynIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageConstIdx.ll
index d002097..e4ec231 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageDynIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageConstIdx.ll
@@ -4,8 +4,8 @@
@.str.b0 = private unnamed_addr constant [3 x i8] c"B0\00", align 1
; CHECK-DAG: OpCapability Shader
-; CHECK-DAG: OpCapability StorageImageArrayDynamicIndexing
; CHECK-DAG: OpCapability Image1D
+; CHECK-DAG: OpCapability Int8
; CHECK-NOT: OpCapability
; CHECK-DAG: OpDecorate [[Var:%[0-9]+]] DescriptorSet 3
diff --git a/llvm/test/CodeGen/X86/combine-umax.ll b/llvm/test/CodeGen/X86/combine-umax.ll
index 25f8ec8..482b4fc 100644
--- a/llvm/test/CodeGen/X86/combine-umax.ll
+++ b/llvm/test/CodeGen/X86/combine-umax.ll
@@ -60,7 +60,7 @@ define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
define <16 x i8> @test_v16i8_demandedbits(<16 x i8> %x, <16 x i8> %y, <16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test_v16i8_demandedbits:
; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/combine-umin.ll b/llvm/test/CodeGen/X86/combine-umin.ll
index 76dbcb5..e2757d0 100644
--- a/llvm/test/CodeGen/X86/combine-umin.ll
+++ b/llvm/test/CodeGen/X86/combine-umin.ll
@@ -77,7 +77,7 @@ define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
define <16 x i8> @test_v16i8_demandedbits(<16 x i8> %x, <16 x i8> %y, <16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test_v16i8_demandedbits:
; SSE2: # %bb.0:
-; SSE2-NEXT: pminub %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll
index ac932d5..1a63515 100644
--- a/llvm/test/CodeGen/X86/vector-compress.ll
+++ b/llvm/test/CodeGen/X86/vector-compress.ll
@@ -1090,7 +1090,6 @@ define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8>
; AVX2-NEXT: pushq %r12
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
@@ -1335,7 +1334,6 @@ define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8>
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpsllw $7, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm3
; AVX2-NEXT: vmovaps %ymm2, (%rsp)
@@ -4733,7 +4731,6 @@ define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp)
@@ -4751,72 +4748,7 @@ define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) nounwind {
; AVX2-NEXT: vpextrb $3, %xmm1, %ecx
; AVX2-NEXT: andl $1, %ecx
; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: vpextrb $4, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $4, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: vpextrb $5, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $5, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $6, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $6, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $7, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $7, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $8, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $8, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $9, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $9, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $10, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $10, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $11, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $11, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $12, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addq %rcx, %rax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $12, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: vpextrb $13, %xmm1, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $13, %xmm0, -24(%rsp,%rax)
-; AVX2-NEXT: vpextrb $14, %xmm1, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: addl %ecx, %eax
-; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT: andl $15, %ecx
-; AVX2-NEXT: vpextrb $14, %xmm0, -24(%rsp,%rcx)
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpextrb $15, %xmm0, -24(%rsp,%rax)
+; AVX2-NEXT: vpextrb $15, %xmm0, -24(%rsp,%rcx)
; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index 1c5be03..ac330a7 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -151,23 +151,19 @@ define <32 x i8> @PR22706(<32 x i1> %x) {
; AVX1: ## %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR22706:
; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll b/llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll
new file mode 100644
index 0000000..d9253e0
--- /dev/null
+++ b/llvm/test/CodeGen/X86/zero-call-used-regs-simd.ll
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 -verify-machineinstrs | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -verify-machineinstrs | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 -verify-machineinstrs | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl,+avx512bw -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512BW
+
+define void @zero_xmm(<4 x i32> %arg) #0 {
+; SSE-LABEL: zero_xmm:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, 0
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zero_xmm:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm0, 0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: zero_xmm:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovaps %xmm0, 0
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ store <4 x i32> %arg, ptr null, align 32
+ ret void
+}
+
+define void @zero_ymm(<8 x i32> %arg) #0 {
+; SSE-LABEL: zero_ymm:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, 16
+; SSE-NEXT: movaps %xmm0, 0
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zero_ymm:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %ymm0, 0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: zero_ymm:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovaps %ymm0, 0
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ store <8 x i32> %arg, ptr null, align 32
+ ret void
+}
+
+define void @zero_zmm(<16 x i32> %arg) #0 {
+; SSE-LABEL: zero_zmm:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm3, 48
+; SSE-NEXT: movaps %xmm2, 32
+; SSE-NEXT: movaps %xmm1, 16
+; SSE-NEXT: movaps %xmm0, 0
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: xorps %xmm2, %xmm2
+; SSE-NEXT: xorps %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zero_zmm:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %ymm1, 32
+; AVX-NEXT: vmovaps %ymm0, 0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: zero_zmm:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovups %zmm0, 0
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ store <16 x i32> %arg, ptr null, align 32
+ ret void
+}
+
+define void @zero_k(<8 x i32> %arg, <8 x i1> %mask) #0 {
+; SSE-LABEL: zero_k:
+; SSE: # %bb.0:
+; SSE-NEXT: psllw $15, %xmm2
+; SSE-NEXT: packsswb %xmm2, %xmm2
+; SSE-NEXT: pmovmskb %xmm2, %eax
+; SSE-NEXT: testb $1, %al
+; SSE-NEXT: jne .LBB3_1
+; SSE-NEXT: # %bb.2: # %else
+; SSE-NEXT: testb $2, %al
+; SSE-NEXT: jne .LBB3_3
+; SSE-NEXT: .LBB3_4: # %else2
+; SSE-NEXT: testb $4, %al
+; SSE-NEXT: jne .LBB3_5
+; SSE-NEXT: .LBB3_6: # %else4
+; SSE-NEXT: testb $8, %al
+; SSE-NEXT: jne .LBB3_7
+; SSE-NEXT: .LBB3_8: # %else6
+; SSE-NEXT: testb $16, %al
+; SSE-NEXT: jne .LBB3_9
+; SSE-NEXT: .LBB3_10: # %else8
+; SSE-NEXT: testb $32, %al
+; SSE-NEXT: jne .LBB3_11
+; SSE-NEXT: .LBB3_12: # %else10
+; SSE-NEXT: testb $64, %al
+; SSE-NEXT: jne .LBB3_13
+; SSE-NEXT: .LBB3_14: # %else12
+; SSE-NEXT: testb $-128, %al
+; SSE-NEXT: je .LBB3_16
+; SSE-NEXT: .LBB3_15: # %cond.store13
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
+; SSE-NEXT: movd %xmm0, 28
+; SSE-NEXT: .LBB3_16: # %else14
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: retq
+; SSE-NEXT: .LBB3_1: # %cond.store
+; SSE-NEXT: movd %xmm0, 0
+; SSE-NEXT: testb $2, %al
+; SSE-NEXT: je .LBB3_4
+; SSE-NEXT: .LBB3_3: # %cond.store1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; SSE-NEXT: movd %xmm2, 4
+; SSE-NEXT: testb $4, %al
+; SSE-NEXT: je .LBB3_6
+; SSE-NEXT: .LBB3_5: # %cond.store3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-NEXT: movd %xmm2, 8
+; SSE-NEXT: testb $8, %al
+; SSE-NEXT: je .LBB3_8
+; SSE-NEXT: .LBB3_7: # %cond.store5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; SSE-NEXT: movd %xmm0, 12
+; SSE-NEXT: testb $16, %al
+; SSE-NEXT: je .LBB3_10
+; SSE-NEXT: .LBB3_9: # %cond.store7
+; SSE-NEXT: movd %xmm1, 16
+; SSE-NEXT: testb $32, %al
+; SSE-NEXT: je .LBB3_12
+; SSE-NEXT: .LBB3_11: # %cond.store9
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
+; SSE-NEXT: movd %xmm0, 20
+; SSE-NEXT: testb $64, %al
+; SSE-NEXT: je .LBB3_14
+; SSE-NEXT: .LBB3_13: # %cond.store11
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE-NEXT: movd %xmm0, 24
+; SSE-NEXT: testb $-128, %al
+; SSE-NEXT: jne .LBB3_15
+; SSE-NEXT: jmp .LBB3_16
+;
+; AVX1-LABEL: zero_k:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vmaskmovps %ymm0, %ymm1, 0
+; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zero_k:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT: vpmaskmovd %ymm0, %ymm1, 0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: zero_k:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, 0 {%k1}
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: kxorw %k0, %k0, %k1
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: zero_k:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmovw2m %xmm1, %k1
+; AVX512BW-NEXT: vmovdqa32 %ymm0, 0 {%k1}
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX512BW-NEXT: kxorq %k0, %k0, %k1
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %arg, ptr null, i32 32, <8 x i1> %mask)
+ ret void
+}
+
+attributes #0 = { "zero-call-used-regs"="used" }
diff --git a/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll b/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll
index e2b6167..c8cc871 100644
--- a/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll
+++ b/llvm/test/DebugInfo/Generic/compileunit-source-language-name.ll
@@ -1,6 +1,10 @@
; RUN: %llc_dwarf -filetype=obj -O0 < %s | llvm-dwarfdump -debug-info - | FileCheck %s --implicit-check-not "DW_AT_language"
-; CHECK: DW_AT_language_name (DW_LNAME_ObjC_plus_plus)
+; CHECK: DW_AT_language_name (DW_LNAME_ObjC_plus_plus)
+; CHECK: DW_AT_language_name (DW_LNAME_C_plus_plus)
+; CHECK: DW_AT_language_version (201100)
+; CHECK: DW_AT_language_name (DW_LNAME_Rust)
+; CHECK-NOT: DW_AT_language_version
@x = global i32 0, align 4, !dbg !0
@@ -9,7 +13,7 @@ define void @_Z4funcv() !dbg !8 {
ret void, !dbg !11
}
-!llvm.dbg.cu = !{!2}
+!llvm.dbg.cu = !{!2, !12, !13}
!llvm.module.flags = !{!6, !7}
!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
@@ -24,3 +28,5 @@ define void @_Z4funcv() !dbg !8 {
!9 = !DISubroutineType(types: !10)
!10 = !{null}
!11 = !DILocation(line: 2, column: 14, scope: !8)
+!12 = distinct !DICompileUnit(sourceLanguageName: DW_LNAME_C_plus_plus, sourceLanguageVersion: 201100, file: !3, producer: "handwritten", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: Apple, sysroot: "/")
+!13 = distinct !DICompileUnit(sourceLanguageName: DW_LNAME_Rust, sourceLanguageVersion: 0, file: !3, producer: "handwritten", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: Apple, sysroot: "/")
diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll
new file mode 100644
index 0000000..e38da0b
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll
@@ -0,0 +1,63 @@
+; RUN: opt < %s -passes=asan -S | FileCheck %s
+; CHECK: %ex = alloca i32, align 4
+; CHECK: catchpad within %{{.*}} [ptr @"??_R0H@8", i32 0, ptr %ex]
+
+; This test ensures that catch parameters are not instrumented on Windows.
+
+; This file was generated using the following source
+;
+; ```C++
+; #include <exception>
+; #include <cstdio>
+;
+; int main() {
+; try {
+; throw 1;
+; } catch (const int ex) {
+; printf("%d\n", ex);
+; return -1;
+; }
+; return 0;
+; }
+;
+; ```
+; then running the following sequence of commands
+;
+; ```
+; clang.exe -g0 -O0 -emit-llvm -c main.cpp -o main.bc
+; llvm-extract.exe -func=main main.bc -o main_func.bc
+; llvm-dis.exe main_func.bc -o main_func_dis.ll
+; ```
+; and finally manually trimming the resulting `.ll` file to remove
+; unnecessary metadata, and manually adding the `sanitize_address` annotation;
+; needed for the ASan pass to run.
+
+target triple = "x86_64-pc-windows-msvc"
+
+@"??_R0H@8" = external global ptr
+
+; Function Attrs: sanitize_address
+define i32 @main() sanitize_address personality ptr @__CxxFrameHandler3 {
+entry:
+ %ex = alloca i32, align 4
+ invoke void @throw()
+ to label %unreachable unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %catch] unwind to caller
+
+catch: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [ptr @"??_R0H@8", i32 0, ptr %ex]
+ call void @opaque() [ "funclet"(token %1) ]
+ catchret from %1 to label %return
+
+return: ; preds = %catch
+ ret i32 0
+
+unreachable: ; preds = %entry
+ unreachable
+}
+
+declare void @throw() noreturn
+declare void @opaque()
+declare i32 @__CxxFrameHandler3(...)
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt
new file mode 100644
index 0000000..d4888ad
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx8_vop3cx_nowarn.txt
@@ -0,0 +1,422 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=tonga -disassemble -show-encoding < %s | FileCheck -strict-whitespace %s
+
+# In GFX10+, v_cmpx_* use EXEC as the implicit dst. The disassembler issues a warning when the dst
+# is not 0x7e (EXEC). In GFX9 and earlier, these instructions have explicit dst. Therefore, such
+# warnings should not be issued.
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 tba, v1, v2 ; encoding: [0x6c,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x6c,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 tma, v1, v2 ; encoding: [0x6e,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x6e,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f32_e64 ttmp[10:11], v1, v2 ; encoding: [0x7a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00]
+0x7a,0x00,0x11,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f64_e64 s[0:1], v[1:2], v2 ; encoding: [0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00]
+0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f16_e64 s[2:3], v1, v2 ; encoding: [0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00]
+0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f16_e64 s[4:5], v1, v2 ; encoding: [0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00]
+0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f16_e64 s[6:7], v1, v2 ; encoding: [0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00]
+0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f16_e64 s[8:9], v1, v2 ; encoding: [0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00]
+0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f16_e64 s[12:13], v1, v2 ; encoding: [0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00]
+0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f16_e64 s[14:15], v1, v2 ; encoding: [0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00]
+0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f16_e64 s[16:17], v1, v2 ; encoding: [0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00]
+0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f16_e64 s[18:19], v1, v2 ; encoding: [0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00]
+0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f16_e64 s[20:21], v1, v2 ; encoding: [0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00]
+0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f16_e64 s[22:23], v1, v2 ; encoding: [0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00]
+0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f16_e64 s[24:25], v1, v2 ; encoding: [0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00]
+0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f16_e64 s[26:27], v1, v2 ; encoding: [0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00]
+0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f16_e64 s[28:29], v1, v2 ; encoding: [0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00]
+0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f16_e64 s[30:31], v1, v2 ; encoding: [0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00]
+0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f16_e64 s[32:33], v1, v2 ; encoding: [0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00]
+0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f16_e64 s[34:35], v1, v2 ; encoding: [0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00]
+0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f32_e64 s[36:37], v1, v2 ; encoding: [0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00]
+0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f32_e64 s[38:39], v1, v2 ; encoding: [0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00]
+0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f32_e64 s[40:41], v1, v2 ; encoding: [0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00]
+0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f32_e64 s[42:43], v1, v2 ; encoding: [0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00]
+0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f32_e64 s[44:45], v1, v2 ; encoding: [0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00]
+0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f32_e64 s[46:47], v1, v2 ; encoding: [0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00]
+0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f32_e64 s[48:49], v1, v2 ; encoding: [0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00]
+0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f32_e64 s[50:51], v1, v2 ; encoding: [0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00]
+0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f32_e64 s[52:53], v1, v2 ; encoding: [0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00]
+0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f32_e64 s[54:55], v1, v2 ; encoding: [0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00]
+0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f32_e64 s[56:57], v1, v2 ; encoding: [0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00]
+0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f32_e64 s[58:59], v1, v2 ; encoding: [0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00]
+0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f32_e64 s[60:61], v1, v2 ; encoding: [0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00]
+0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f32_e64 s[62:63], v1, v2 ; encoding: [0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00]
+0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f32_e64 s[64:65], v1, v2 ; encoding: [0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00]
+0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f32_e64 s[66:67], v1, v2 ; encoding: [0x42,0x00,0x5f,0xd0,0x01,0x05,0x02,0x00]
+0x42,0x00,0x5f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f64_e64 s[68:69], v[1:2], v[2:3] ; encoding: [0x44,0x00,0x70,0xd0,0x01,0x05,0x02,0x00]
+0x44,0x00,0x70,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f64_e64 s[70:71], v[1:2], v[2:3] ; encoding: [0x46,0x00,0x71,0xd0,0x01,0x05,0x02,0x00]
+0x46,0x00,0x71,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f64_e64 s[72:73], v[1:2], v[2:3] ; encoding: [0x48,0x00,0x72,0xd0,0x01,0x05,0x02,0x00]
+0x48,0x00,0x72,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f64_e64 s[74:75], v[1:2], v[2:3] ; encoding: [0x4a,0x00,0x73,0xd0,0x01,0x05,0x02,0x00]
+0x4a,0x00,0x73,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f64_e64 s[76:77], v[1:2], v[2:3] ; encoding: [0x4c,0x00,0x74,0xd0,0x01,0x05,0x02,0x00]
+0x4c,0x00,0x74,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f64_e64 s[78:79], v[1:2], v[2:3] ; encoding: [0x4e,0x00,0x75,0xd0,0x01,0x05,0x02,0x00]
+0x4e,0x00,0x75,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f64_e64 s[80:81], v[1:2], v[2:3] ; encoding: [0x50,0x00,0x76,0xd0,0x01,0x05,0x02,0x00]
+0x50,0x00,0x76,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f64_e64 s[82:83], v[1:2], v[2:3] ; encoding: [0x52,0x00,0x77,0xd0,0x01,0x05,0x02,0x00]
+0x52,0x00,0x77,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f64_e64 s[84:85], v[1:2], v[2:3] ; encoding: [0x54,0x00,0x78,0xd0,0x01,0x05,0x02,0x00]
+0x54,0x00,0x78,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f64_e64 s[86:87], v[1:2], v[2:3] ; encoding: [0x56,0x00,0x79,0xd0,0x01,0x05,0x02,0x00]
+0x56,0x00,0x79,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f64_e64 s[88:89], v[1:2], v[2:3] ; encoding: [0x58,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00]
+0x58,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f64_e64 s[90:91], v[1:2], v[2:3] ; encoding: [0x5a,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00]
+0x5a,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f64_e64 s[92:93], v[1:2], v[2:3] ; encoding: [0x5c,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00]
+0x5c,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f64_e64 s[94:95], v[1:2], v[2:3] ; encoding: [0x5e,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00]
+0x5e,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f64_e64 s[96:97], v[1:2], v[2:3] ; encoding: [0x60,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00]
+0x60,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f64_e64 s[98:99], v[1:2], v[2:3] ; encoding: [0x62,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00]
+0x62,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i16_e64 s[100:101], v1, v2 ; encoding: [0x64,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00]
+0x64,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i16_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i16_e64 xnack_mask, v1, v2 ; encoding: [0x68,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00]
+0x68,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i16_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i16_e64 tba, v1, v2 ; encoding: [0x6c,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00]
+0x6c,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i16_e64 tma, v1, v2 ; encoding: [0x6e,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00]
+0x6e,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i16_e64 ttmp[0:1], v1, v2 ; encoding: [0x70,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00]
+0x70,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i16_e64 ttmp[2:3], v1, v2 ; encoding: [0x72,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00]
+0x72,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u16_e64 ttmp[4:5], v1, v2 ; encoding: [0x74,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00]
+0x74,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u16_e64 ttmp[6:7], v1, v2 ; encoding: [0x76,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00]
+0x76,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u16_e64 ttmp[8:9], v1, v2 ; encoding: [0x78,0x00,0xba,0xd0,0x01,0x05,0x02,0x00]
+0x78,0x00,0xba,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u16_e64 ttmp[10:11], v1, v2 ; encoding: [0x7a,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00]
+0x7a,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u16_e64 exec, v1, v2 ; encoding: [0x7e,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00]
+0x7e,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00
+
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt
new file mode 100644
index 0000000..0c4f107
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx9_vop3c_nowarn.txt
@@ -0,0 +1,402 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx900 -disassemble -show-encoding < %s 2>&1 | FileCheck -strict-whitespace %s
+
+# In GFX10+, v_cmpx_* use EXEC as the implicit dst. The disassembler issues a warning when the dst
+# is not 0x7e (EXEC). In GFX9 and earlier, these instructions have explicit dst. Therefore, such
+# warnings should not be issued.
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmp_class_f32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmp_class_f32_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0x10,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0x10,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmp_class_f32_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0x10,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f64_e64 s[0:1], v[1:2], v2 ; encoding: [0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00]
+0x00,0x00,0x13,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_class_f16_e64 s[2:3], v1, v2 ; encoding: [0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00]
+0x02,0x00,0x15,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f16_e64 s[4:5], v1, v2 ; encoding: [0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00]
+0x04,0x00,0x30,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f16_e64 s[6:7], v1, v2 ; encoding: [0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00]
+0x06,0x00,0x31,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f16_e64 s[8:9], v1, v2 ; encoding: [0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00]
+0x08,0x00,0x32,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0x33,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f16_e64 s[12:13], v1, v2 ; encoding: [0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00]
+0x0c,0x00,0x34,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f16_e64 s[14:15], v1, v2 ; encoding: [0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00]
+0x0e,0x00,0x35,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f16_e64 s[16:17], v1, v2 ; encoding: [0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00]
+0x10,0x00,0x36,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f16_e64 s[18:19], v1, v2 ; encoding: [0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00]
+0x12,0x00,0x37,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f16_e64 s[20:21], v1, v2 ; encoding: [0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00]
+0x14,0x00,0x38,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f16_e64 s[22:23], v1, v2 ; encoding: [0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00]
+0x16,0x00,0x39,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f16_e64 s[24:25], v1, v2 ; encoding: [0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00]
+0x18,0x00,0x3a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f16_e64 s[26:27], v1, v2 ; encoding: [0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00]
+0x1a,0x00,0x3b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f16_e64 s[28:29], v1, v2 ; encoding: [0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00]
+0x1c,0x00,0x3c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f16_e64 s[30:31], v1, v2 ; encoding: [0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00]
+0x1e,0x00,0x3d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f16_e64 s[32:33], v1, v2 ; encoding: [0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00]
+0x20,0x00,0x3e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f16_e64 s[34:35], v1, v2 ; encoding: [0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00]
+0x22,0x00,0x3f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f32_e64 s[36:37], v1, v2 ; encoding: [0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00]
+0x24,0x00,0x50,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_f32_e64 s[38:39], v1, v2 ; encoding: [0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00]
+0x26,0x00,0x51,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f32_e64 s[40:41], v1, v2 ; encoding: [0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00]
+0x28,0x00,0x52,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f32_e64 s[42:43], v1, v2 ; encoding: [0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00]
+0x2a,0x00,0x53,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f32_e64 s[44:45], v1, v2 ; encoding: [0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00]
+0x2c,0x00,0x54,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f32_e64 s[46:47], v1, v2 ; encoding: [0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00]
+0x2e,0x00,0x55,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f32_e64 s[48:49], v1, v2 ; encoding: [0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00]
+0x30,0x00,0x56,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f32_e64 s[50:51], v1, v2 ; encoding: [0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00]
+0x32,0x00,0x57,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f32_e64 s[52:53], v1, v2 ; encoding: [0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00]
+0x34,0x00,0x58,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f32_e64 s[54:55], v1, v2 ; encoding: [0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00]
+0x36,0x00,0x59,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f32_e64 s[56:57], v1, v2 ; encoding: [0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00]
+0x38,0x00,0x5a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f32_e64 s[58:59], v1, v2 ; encoding: [0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00]
+0x3a,0x00,0x5b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f32_e64 s[60:61], v1, v2 ; encoding: [0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00]
+0x3c,0x00,0x5c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f32_e64 s[62:63], v1, v2 ; encoding: [0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00]
+0x3e,0x00,0x5d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f32_e64 s[64:65], v1, v2 ; encoding: [0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00]
+0x40,0x00,0x5e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_f64_e64 s[66:67], v[1:2], v[2:3] ; encoding: [0x42,0x00,0x70,0xd0,0x01,0x05,0x02,0x00]
+0x42,0x00,0x70,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_f64_e64 s[68:69], v[1:2], v[2:3] ; encoding: [0x44,0x00,0x72,0xd0,0x01,0x05,0x02,0x00]
+0x44,0x00,0x72,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_f64_e64 s[70:71], v[1:2], v[2:3] ; encoding: [0x46,0x00,0x73,0xd0,0x01,0x05,0x02,0x00]
+0x46,0x00,0x73,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_f64_e64 s[72:73], v[1:2], v[2:3] ; encoding: [0x48,0x00,0x74,0xd0,0x01,0x05,0x02,0x00]
+0x48,0x00,0x74,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lg_f64_e64 s[74:75], v[1:2], v[2:3] ; encoding: [0x4a,0x00,0x75,0xd0,0x01,0x05,0x02,0x00]
+0x4a,0x00,0x75,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_f64_e64 s[76:77], v[1:2], v[2:3] ; encoding: [0x4c,0x00,0x76,0xd0,0x01,0x05,0x02,0x00]
+0x4c,0x00,0x76,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_o_f64_e64 s[78:79], v[1:2], v[2:3] ; encoding: [0x4e,0x00,0x77,0xd0,0x01,0x05,0x02,0x00]
+0x4e,0x00,0x77,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_u_f64_e64 s[80:81], v[1:2], v[2:3] ; encoding: [0x50,0x00,0x78,0xd0,0x01,0x05,0x02,0x00]
+0x50,0x00,0x78,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nge_f64_e64 s[82:83], v[1:2], v[2:3] ; encoding: [0x52,0x00,0x79,0xd0,0x01,0x05,0x02,0x00]
+0x52,0x00,0x79,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlg_f64_e64 s[84:85], v[1:2], v[2:3] ; encoding: [0x54,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00]
+0x54,0x00,0x7a,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ngt_f64_e64 s[86:87], v[1:2], v[2:3] ; encoding: [0x56,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00]
+0x56,0x00,0x7b,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nle_f64_e64 s[88:89], v[1:2], v[2:3] ; encoding: [0x58,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00]
+0x58,0x00,0x7c,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_neq_f64_e64 s[90:91], v[1:2], v[2:3] ; encoding: [0x5a,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00]
+0x5a,0x00,0x7d,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_nlt_f64_e64 s[92:93], v[1:2], v[2:3] ; encoding: [0x5c,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00]
+0x5c,0x00,0x7e,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_tru_f64_e64 s[94:95], v[1:2], v[2:3] ; encoding: [0x5e,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00]
+0x5e,0x00,0x7f,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i16_e64 s[96:97], v1, v2 ; encoding: [0x60,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00]
+0x60,0x00,0xb0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i16_e64 s[98:99], v1, v2 ; encoding: [0x62,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00]
+0x62,0x00,0xb1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i16_e64 s[100:101], v1, v2 ; encoding: [0x64,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00]
+0x64,0x00,0xb2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i16_e64 flat_scratch, v1, v2 ; encoding: [0x66,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00]
+0x66,0x00,0xb3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i16_e64 xnack_mask, v1, v2 ; encoding: [0x68,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00]
+0x68,0x00,0xb4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i16_e64 vcc, v1, v2 ; encoding: [0x6a,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00]
+0x6a,0x00,0xb5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i16_e64 ttmp[0:1], v1, v2 ; encoding: [0x6c,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00]
+0x6c,0x00,0xb6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i16_e64 ttmp[2:3], v1, v2 ; encoding: [0x6e,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00]
+0x6e,0x00,0xb7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u16_e64 ttmp[4:5], v1, v2 ; encoding: [0x70,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00]
+0x70,0x00,0xb8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u16_e64 ttmp[6:7], v1, v2 ; encoding: [0x72,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00]
+0x72,0x00,0xb9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u16_e64 ttmp[8:9], v1, v2 ; encoding: [0x74,0x00,0xba,0xd0,0x01,0x05,0x02,0x00]
+0x74,0x00,0xba,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u16_e64 ttmp[10:11], v1, v2 ; encoding: [0x76,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00]
+0x76,0x00,0xbb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u16_e64 ttmp[12:13], v1, v2 ; encoding: [0x78,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00]
+0x78,0x00,0xbc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u16_e64 ttmp[14:15], v1, v2 ; encoding: [0x7a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00]
+0x7a,0x00,0xbd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u16_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xbf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xd9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xda,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xde,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u32_e64 s[10:11], v1, v2 ; encoding: [0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xdf,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf0,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf1,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf2,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf3,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf4,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf5,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf6,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_i64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf7,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_f_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf8,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_lt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xf9,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_eq_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfa,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_le_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfb,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_gt_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfc,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ne_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfd,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_ge_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xfe,0xd0,0x01,0x05,0x02,0x00
+
+# CHECK-NOT: [[@LINE+2]]:1: warning: potentially undefined instruction encoding
+# CHECK: v_cmpx_t_u64_e64 s[10:11], v[1:2], v[2:3] ; encoding: [0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00]
+0x0a,0x00,0xff,0xd0,0x01,0x05,0x02,0x00
+
diff --git a/llvm/test/Other/loop-pm-invalidation.ll b/llvm/test/Other/loop-pm-invalidation.ll
index 4bead0b..25552f7 100644
--- a/llvm/test/Other/loop-pm-invalidation.ll
+++ b/llvm/test/Other/loop-pm-invalidation.ll
@@ -16,11 +16,6 @@
; RUN: opt -disable-output -disable-verify -verify-analysis-invalidation=0 -debug-pass-manager %s -aa-pipeline= 2>&1 \
; RUN: -passes='loop(no-op-loop,loop-deletion),invalidate<scalar-evolution>,loop(no-op-loop)' \
; RUN: | FileCheck %s --check-prefix=CHECK-SCEV-INV-AFTER-DELETE
-;
-; Test that BFI is invalidated after the loop adapter if any of the loop passes
-; invalidated it.
-; RUN: opt -disable-output -disable-verify -verify-analysis-invalidation=0 -debug-pass-manager %s -aa-pipeline= 2>&1 \
-; RUN: -O1 | FileCheck %s --check-prefix=CHECK-BFI-INV
define void @no_loops() {
; CHECK-LOOP-INV: Running pass: LoopSimplifyPass
@@ -247,28 +242,3 @@ l0.header:
exit:
ret void
}
-
-; CHECK-BFI-INV-LABEL: Running analysis: OuterAnalysisManagerProxy<{{.*}}> on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LoopInstSimplifyPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LoopSimplifyCFGPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LICMPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LoopRotatePass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: LICMPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: SimpleLoopUnswitchPass on loop %l0.header in function simplifiable_loop
-; CHECK-BFI-INV-NEXT: Invalidating analysis: PostDominatorTreeAnalysis on simplifiable_loop
-; CHECK-BFI-INV-NEXT: Invalidating analysis: BranchProbabilityAnalysis on simplifiable_loop
-; CHECK-BFI-INV-NEXT: Invalidating analysis: BlockFrequencyAnalysis on simplifiable_loop
-; CHECK-BFI-INV-NEXT: Running pass: SimplifyCFGPass on simplifiable_loop (5 instructions)
-
-define void @simplifiable_loop(i1 %c) !prof !0 {
-entry:
- br label %l0.header
-
-l0.header:
- br label %l0.latch
-
-l0.latch:
- br i1 %c, label %l0.header, label %l0.latch
-}
-
-!0 = !{!"function_entry_count", i64 1}
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 94e860b..65b96c8 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -186,7 +186,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index a08a140..3a0fffe 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -114,7 +114,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index d9e2dd3..4623edc 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -100,7 +100,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index 2f6fa4b..590afd9 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -109,7 +109,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
index 5aacd26..dd6acd2 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
@@ -146,7 +146,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index f6a9406..ee05452 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -149,7 +149,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
index 48a9433..fd95e94 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
@@ -114,7 +114,6 @@
; CHECK-O-NEXT: Running pass: LoopRotatePass
; CHECK-O-NEXT: Running pass: LICM
; CHECK-O-NEXT: Running pass: SimpleLoopUnswitchPass
-; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running pass: LoopSimplifyPass
diff --git a/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll b/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll
index 6732efc..dbd572d 100644
--- a/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll
@@ -111,7 +111,6 @@ define void @test_sub_cmp(ptr align 8 %start, ptr %end) {
; N32-NEXT: [[CMP_ENTRY:%.*]] = icmp eq ptr [[START]], [[END]]
; N32-NEXT: br i1 [[CMP_ENTRY]], label %[[EXIT:.*]], label %[[LOOP_HEADER_PREHEADER:.*]]
; N32: [[LOOP_HEADER_PREHEADER]]:
-; N32-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[PTR_DIFF]], i64 1)
; N32-NEXT: br label %[[LOOP_HEADER:.*]]
; N32: [[LOOP_HEADER]]:
; N32-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[LOOP_HEADER_PREHEADER]] ]
@@ -119,7 +118,7 @@ define void @test_sub_cmp(ptr align 8 %start, ptr %end) {
; N32-NEXT: br i1 [[C_1]], label %[[EXIT_EARLY:.*]], label %[[LOOP_LATCH]]
; N32: [[LOOP_LATCH]]:
; N32-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
-; N32-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[UMAX]]
+; N32-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[PTR_DIFF]]
; N32-NEXT: br i1 [[EXITCOND]], label %[[LOOP_HEADER]], label %[[EXIT_LOOPEXIT:.*]]
; N32: [[EXIT_EARLY]]:
; N32-NEXT: br label %[[EXIT]]
@@ -162,13 +161,17 @@ define void @test_ptr_diff_with_assume(ptr align 8 %start, ptr align 8 %end, ptr
; CHECK-NEXT: [[PTR_DIFF:%.*]] = sub i64 [[START_INT]], [[END_INT]]
; CHECK-NEXT: [[DIFF_CMP:%.*]] = icmp ult i64 [[PTR_DIFF]], 2
; CHECK-NEXT: call void @llvm.assume(i1 [[DIFF_CMP]])
+; CHECK-NEXT: [[COMPUTED_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[PTR_DIFF]]
; CHECK-NEXT: [[ENTRY_CMP:%.*]] = icmp eq ptr [[START]], [[END]]
; CHECK-NEXT: br i1 [[ENTRY_CMP]], label %[[EXIT:.*]], label %[[LOOP_BODY_PREHEADER:.*]]
; CHECK: [[LOOP_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[LOOP_BODY:.*]]
; CHECK: [[LOOP_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_BODY]] ], [ [[START]], %[[LOOP_BODY_PREHEADER]] ]
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @cond()
-; CHECK-NEXT: br i1 true, label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY]]
+; CHECK-NEXT: [[IV_NEXT]] = getelementptr i8, ptr [[IV]], i64 1
+; CHECK-NEXT: [[LOOP_CMP:%.*]] = icmp eq ptr [[IV_NEXT]], [[COMPUTED_END]]
+; CHECK-NEXT: br i1 [[LOOP_CMP]], label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY]]
; CHECK: [[EXIT_LOOPEXIT]]:
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
@@ -182,13 +185,17 @@ define void @test_ptr_diff_with_assume(ptr align 8 %start, ptr align 8 %end, ptr
; N32-NEXT: [[PTR_DIFF:%.*]] = sub i64 [[START_INT]], [[END_INT]]
; N32-NEXT: [[DIFF_CMP:%.*]] = icmp ult i64 [[PTR_DIFF]], 2
; N32-NEXT: call void @llvm.assume(i1 [[DIFF_CMP]])
+; N32-NEXT: [[COMPUTED_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[PTR_DIFF]]
; N32-NEXT: [[ENTRY_CMP:%.*]] = icmp eq ptr [[START]], [[END]]
; N32-NEXT: br i1 [[ENTRY_CMP]], label %[[EXIT:.*]], label %[[LOOP_BODY_PREHEADER:.*]]
; N32: [[LOOP_BODY_PREHEADER]]:
; N32-NEXT: br label %[[LOOP_BODY:.*]]
; N32: [[LOOP_BODY]]:
+; N32-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_BODY]] ], [ [[START]], %[[LOOP_BODY_PREHEADER]] ]
; N32-NEXT: [[TMP0:%.*]] = call i1 @cond()
-; N32-NEXT: br i1 true, label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY]]
+; N32-NEXT: [[IV_NEXT]] = getelementptr i8, ptr [[IV]], i64 1
+; N32-NEXT: [[LOOP_CMP:%.*]] = icmp eq ptr [[IV_NEXT]], [[COMPUTED_END]]
+; N32-NEXT: br i1 [[LOOP_CMP]], label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP_BODY]]
; N32: [[EXIT_LOOPEXIT]]:
; N32-NEXT: br label %[[EXIT]]
; N32: [[EXIT]]:
diff --git a/llvm/test/Transforms/InstCombine/scmp.ll b/llvm/test/Transforms/InstCombine/scmp.ll
index 2bf22ae..c0be5b9 100644
--- a/llvm/test/Transforms/InstCombine/scmp.ll
+++ b/llvm/test/Transforms/InstCombine/scmp.ll
@@ -423,6 +423,86 @@ define i8 @scmp_from_select_eq_and_gt_commuted3(i32 %x, i32 %y) {
ret i8 %r
}
+; Commutative tests for (x != y) ? (x > y ? 1 : -1) : 0
+define i8 @scmp_from_select_ne_and_gt_commuted1(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_and_gt_commuted1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp slt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_and_gt_commuted2(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_and_gt_commuted2(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 -1, i8 1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_and_gt_commuted3(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_and_gt_commuted3(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+; Commutative tests for x != C ? (x > C - 1 ? 1 : -1) : 0
+define i8 @scmp_from_select_ne_const_and_gt_commuted1(i32 %x) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_const_and_gt_commuted1(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 5)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, 5
+ %gt = icmp sgt i32 %x, 4
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_const_and_gt_commuted2(i32 %x) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_const_and_gt_commuted2(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 5)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, 5
+ %gt = icmp sgt i32 %x, 4
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_ne_const_and_gt_commuted3(i32 %x) {
+; CHECK-LABEL: define i8 @scmp_from_select_ne_const_and_gt_commuted3(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 5)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, 5
+ %gt = icmp sgt i32 %x, 4
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
define <3 x i2> @scmp_unary_shuffle_ops(<3 x i8> %x, <3 x i8> %y) {
; CHECK-LABEL: define <3 x i2> @scmp_unary_shuffle_ops(
; CHECK-SAME: <3 x i8> [[X:%.*]], <3 x i8> [[Y:%.*]]) {
@@ -436,6 +516,187 @@ define <3 x i2> @scmp_unary_shuffle_ops(<3 x i8> %x, <3 x i8> %y) {
ret <3 x i2> %r
}
+define i32 @scmp_sgt_slt(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_sgt_slt(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[A_LOBIT:%.*]] = ashr i32 [[A]], 31
+; CHECK-NEXT: [[CMP_INV:%.*]] = icmp slt i32 [[A]], 1
+; CHECK-NEXT: [[RETVAL_0:%.*]] = select i1 [[CMP_INV]], i32 [[A_LOBIT]], i32 1
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp slt i32 %a, 0
+ %. = select i1 %cmp1, i32 -1, i32 0
+ %retval.0 = select i1 %cmp, i32 1, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_slt(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_slt(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp slt i32 %a, 1
+ %. = select i1 %cmp1.inv, i32 -1, i32 1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_sgt(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp sgt i32 %a, -1
+ %. = select i1 %cmp1.inv, i32 1, i32 -1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+
+define i32 @scmp_zero_sgt_1(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt_1(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[COND2:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp sgt i32 %a, -1
+ %cond = select i1 %cmp1, i32 1, i32 -1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @scmp_zero_slt_1(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_slt_1(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[COND2:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 0)
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp slt i32 %a, 1
+ %cond = select i1 %cmp1, i32 -1, i32 1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @scmp_zero_slt_neg(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_slt_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[A]], -1
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP1]], i32 -1, i32 1
+; CHECK-NEXT: [[COND2:%.*]] = select i1 [[CMP]], i32 0, i32 [[COND]]
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp slt i32 %a, -1
+ %cond = select i1 %cmp1, i32 -1, i32 1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @scmp_zero_sgt_neg(i32 %a) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], 1
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP1]], i32 1, i32 -1
+; CHECK-NEXT: [[COND2:%.*]] = select i1 [[CMP]], i32 0, i32 [[COND]]
+; CHECK-NEXT: ret i32 [[COND2]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp sgt i32 %a, 1
+ %cond = select i1 %cmp1, i32 1, i32 -1
+ %cond2 = select i1 %cmp, i32 0, i32 %cond
+ ret i32 %cond2
+}
+
+define i32 @ucmp_ugt_ult_neg(i32 %a) {
+; CHECK-LABEL: define i32 @ucmp_ugt_ult_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[RETVAL_0:%.*]] = zext i1 [[CMP_NOT]] to i32
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp ugt i32 %a, 0
+ %cmp1 = icmp ult i32 %a, 0
+ %. = select i1 %cmp1, i32 -1, i32 0
+ %retval.0 = select i1 %cmp, i32 1, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @ucmp_zero_ult_neg(i32 %a) {
+; CHECK-LABEL: define i32 @ucmp_zero_ult_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[RETVAL_0:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp ult i32 %a, 1
+ %. = select i1 %cmp1.inv, i32 -1, i32 1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @ucmp_zero_ugt_neg(i32 %a) {
+; CHECK-LABEL: define i32 @ucmp_zero_ugt_neg(
+; CHECK-SAME: i32 [[A:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[RETVAL_0:%.*]] = sext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %cmp1.inv = icmp ugt i32 %a, -1
+ %. = select i1 %cmp1.inv, i32 1, i32 -1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_sgt_slt_ab(i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @scmp_sgt_slt_ab(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp sgt i32 %a, %b
+ %cmp1 = icmp slt i32 %a, %b
+ %. = select i1 %cmp1, i32 -1, i32 0
+ %retval.0 = select i1 %cmp, i32 1, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_slt_ab(i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @scmp_zero_slt_ab(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, %b
+ %cmp1.inv = icmp slt i32 %a, %b
+ %. = select i1 %cmp1.inv, i32 -1, i32 1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
+define i32 @scmp_zero_sgt_ab(i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @scmp_zero_sgt_ab(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[RETVAL_0:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+ %cmp = icmp eq i32 %a, %b
+ %cmp1.inv = icmp sgt i32 %a, %b
+ %. = select i1 %cmp1.inv, i32 1, i32 -1
+ %retval.0 = select i1 %cmp, i32 0, i32 %.
+ ret i32 %retval.0
+}
+
; Negative test: true value of outer select is not zero
define i8 @scmp_from_select_eq_and_gt_neg1(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_neg1(
diff --git a/llvm/test/Transforms/LoopPredication/preserve-bpi.ll b/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
deleted file mode 100644
index 7fbb197..0000000
--- a/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: opt -mtriple=x86_64 -passes='loop-mssa(loop-predication,licm,simple-loop-unswitch<nontrivial>,loop-simplifycfg)' -debug-pass-manager -debug-only=branch-prob -S < %s 2>&1 | FileCheck %s
-
-; REQUIRES: asserts
-
-; This test is to solely check that we do not run BPI every single time loop
-; predication is invoked (since BPI is preserved as part of
-; LoopStandardAnalysisResults).
-declare void @llvm.experimental.guard(i1, ...)
-
-; CHECK: Running pass: LoopPredicationPass on loop
-; CHECK-NEXT: Running pass: LICMPass on loop
-; CHECK-NEXT: Running pass: SimpleLoopUnswitchPass on loop
-; CHECK-NEXT: Running analysis: OuterAnalysisManagerProxy
-; CHECK-NEXT: Running pass: LoopPredicationPass on loop
-; CHECK-NEXT: Running pass: LICMPass on loop
-; CHECK-NEXT: Running pass: SimpleLoopUnswitchPass on loop
-; CHECK-NEXT: Running pass: LoopSimplifyCFGPass on loop
-
-define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
-entry:
- %tmp5 = icmp eq i32 %n, 0
- br i1 %tmp5, label %exit, label %loop.preheader
-
-loop.preheader: ; preds = %entry
- br label %loop
-
-loop: ; preds = %guarded, %loop.preheader
- %loop.acc = phi i32 [ %loop.acc.next, %guarded ], [ 0, %loop.preheader ]
- %i = phi i32 [ %i.next, %guarded ], [ 0, %loop.preheader ]
- %within.bounds = icmp ult i32 %i, %length
- %widenable_cond = call i1 @llvm.experimental.widenable.condition()
- %exiplicit_guard_cond = and i1 %within.bounds, %widenable_cond
- br i1 %exiplicit_guard_cond, label %guarded, label %deopt, !prof !0
-
-deopt: ; preds = %loop
- %deoptcall = call i32 (...) @llvm.experimental.deoptimize.i32(i32 9) [ "deopt"() ]
- ret i32 %deoptcall
-
-guarded: ; preds = %loop
- %i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
- %array.i = load i32, ptr %array.i.ptr, align 4
- %loop.acc.next = add i32 %loop.acc, %array.i
- %i.next = add nuw i32 %i, 1
- %continue = icmp ult i32 %i.next, %n
- br i1 %continue, label %loop, label %exit, !prof !2
-
-exit: ; preds = %guarded, %entry
- %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %guarded ]
- ret i32 %result
-}
-
-declare i32 @llvm.experimental.deoptimize.i32(...)
-declare i1 @llvm.experimental.widenable.condition() #0
-
-attributes #0 = { inaccessiblememonly nounwind }
-
-!0 = !{!"branch_weights", i32 1048576, i32 1}
-!1 = !{i32 1, i32 -2147483648}
-!2 = !{!"branch_weights", i32 1024, i32 1}
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
index b106f99..1153d18 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
@@ -6,7 +6,7 @@
; Check that the addresses for a scalarized memory access is not extracted
; from a vector register.
-define i32 @foo(ptr nocapture %A) {
+define void @foo(ptr nocapture %A) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
@@ -27,7 +27,7 @@ define i32 @foo(ptr nocapture %A) {
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: ret i32 poison
+; CHECK-NEXT: ret void
;
entry:
@@ -44,12 +44,12 @@ for.body:
br i1 %exitcond, label %for.end, label %for.body
for.end:
- ret i32 poison
+ ret void
}
; Check that a load of address is scalarized.
-define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
+define void @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK-LABEL: @foo1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
@@ -74,7 +74,7 @@ define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: ret i32 poison
+; CHECK-NEXT: ret void
;
entry:
@@ -93,5 +93,5 @@ for.body:
br i1 %exitcond, label %for.end, label %for.body
for.end:
- ret i32 poison
+ ret void
}
diff --git a/llvm/test/Transforms/LoopVectorize/pr48832.ll b/llvm/test/Transforms/LoopVectorize/pr48832.ll
index b89be88..c6ebe85 100644
--- a/llvm/test/Transforms/LoopVectorize/pr48832.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr48832.ll
@@ -23,7 +23,7 @@ for.body: ; preds = %for.cond
br i1 true, label %cond.false, label %land.rhs
land.rhs: ; preds = %for.body
- br i1 poison, label %cond.end, label %cond.false
+ br i1 false, label %cond.end, label %cond.false
cond.false: ; preds = %for.body, %land.rhs
br label %cond.end
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll
new file mode 100644
index 0000000..4ec5898
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder-rm.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=96 -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=NO_SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=64 -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+
+; REQUIRES: aarch64-registered-target
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:8:32:64-S128"
+target triple = "aarch64-apple-ios"
+
+define void @matmul(ptr %a, ptr %b, ptr %c) {
+; SPLIT_REMAINDER-LABEL: define void @matmul(
+; SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[B]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[B]], i64 3
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD2:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP3:%.*]] = getelementptr float, ptr [[B]], i64 6
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[VEC_GEP3]], align 4
+; SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <2 x float> [[SPLAT_SPLAT]], [[BLOCK]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD2]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT6]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[SPLAT_SPLAT7]], [[BLOCK5]]
+; SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[TMP2]], [[TMP4]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD4]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <2 x float> poison, float [[TMP6]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT9]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[SPLAT_SPLAT10]], [[BLOCK8]]
+; SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[TMP5]], [[TMP7]]
+; SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP8]], <2 x float> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 2>
+; SPLIT_REMAINDER-NEXT: [[BLOCK11:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP11:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x float> poison, float [[TMP11]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT12]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP12:%.*]] = fmul <1 x float> [[SPLAT_SPLAT13]], [[BLOCK11]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK14:%.*]] = shufflevector <3 x float> [[COL_LOAD2]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP13:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x float> poison, float [[TMP13]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT15]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP14:%.*]] = fmul <1 x float> [[SPLAT_SPLAT16]], [[BLOCK14]]
+; SPLIT_REMAINDER-NEXT: [[TMP15:%.*]] = fadd <1 x float> [[TMP12]], [[TMP14]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK17:%.*]] = shufflevector <3 x float> [[COL_LOAD4]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP16:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x float> poison, float [[TMP16]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT18]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP17:%.*]] = fmul <1 x float> [[SPLAT_SPLAT19]], [[BLOCK17]]
+; SPLIT_REMAINDER-NEXT: [[TMP18:%.*]] = fadd <1 x float> [[TMP15]], [[TMP17]]
+; SPLIT_REMAINDER-NEXT: [[TMP19:%.*]] = shufflevector <1 x float> [[TMP18]], <1 x float> poison, <3 x i32> <i32 0, i32 poison, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP20:%.*]] = shufflevector <3 x float> [[TMP10]], <3 x float> [[TMP19]], <3 x i32> <i32 0, i32 1, i32 3>
+; SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP20]], ptr [[C]], align 4
+; SPLIT_REMAINDER-NEXT: ret void
+;
+; NO_SPLIT_REMAINDER-LABEL: define void @matmul(
+; NO_SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[B]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[B]], i64 3
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD2:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP3:%.*]] = getelementptr float, ptr [[B]], i64 6
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[VEC_GEP3]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <3 x float> poison, float [[TMP1]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <3 x float> [[SPLAT_SPLAT]], [[BLOCK]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD2]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 1
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <3 x float> poison, float [[TMP3]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT6]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <3 x float> [[SPLAT_SPLAT7]], [[BLOCK5]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <3 x float> [[TMP2]], [[TMP4]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD4]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD]], i64 2
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <3 x float> poison, float [[TMP6]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT9]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <3 x float> [[SPLAT_SPLAT10]], [[BLOCK8]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <3 x float> [[TMP5]], [[TMP7]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <3 x float> [[TMP8]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 5>
+; NO_SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP10]], ptr [[C]], align 4
+; NO_SPLIT_REMAINDER-NEXT: ret void
+;
+ %a_load = load <3 x float>, ptr %a, align 4
+ %b_load = load <9 x float>, ptr %b, align 4
+ %matmul = tail call <3 x float> @llvm.matrix.multiply.v3f32.v9f32.v3f32(<3 x float> %a_load, <9 x float> %b_load, i32 1, i32 3, i32 3)
+ store <3 x float> %matmul, ptr %c, align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll
new file mode 100644
index 0000000..fbc2cbc
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-remainder.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=96 -S < %s | FileCheck --check-prefix=NO_SPLIT_REMAINDER %s
+; RUN: opt -passes='lower-matrix-intrinsics' -matrix-split-matmul-remainder-over-threshold=64 -S < %s | FileCheck --check-prefix=SPLIT_REMAINDER %s
+
+; REQUIRES: aarch64-registered-target
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:8:32:64-S128"
+target triple = "aarch64-apple-ios"
+
+define void @matmul(ptr %a, ptr %b, ptr %c) {
+; SPLIT_REMAINDER-LABEL: define void @matmul(
+; SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[A]], i64 3
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; SPLIT_REMAINDER-NEXT: [[VEC_GEP2:%.*]] = getelementptr float, ptr [[A]], i64 6
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD3:%.*]] = load <3 x float>, ptr [[VEC_GEP2]], align 4
+; SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[B]], align 4
+; SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <2 x float> [[BLOCK]], [[SPLAT_SPLAT]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT6]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[BLOCK5]], [[SPLAT_SPLAT7]]
+; SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[TMP2]], [[TMP4]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD3]], <3 x float> poison, <2 x i32> <i32 0, i32 1>
+; SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <2 x float> poison, float [[TMP6]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <2 x float> [[SPLAT_SPLATINSERT9]], <2 x float> poison, <2 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[BLOCK8]], [[SPLAT_SPLAT10]]
+; SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[TMP5]], [[TMP7]]
+; SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP8]], <2 x float> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 2>
+; SPLIT_REMAINDER-NEXT: [[BLOCK11:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP11:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x float> poison, float [[TMP11]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT12]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP12:%.*]] = fmul <1 x float> [[BLOCK11]], [[SPLAT_SPLAT13]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK14:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP13:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 1
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x float> poison, float [[TMP13]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT15]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP14:%.*]] = fmul <1 x float> [[BLOCK14]], [[SPLAT_SPLAT16]]
+; SPLIT_REMAINDER-NEXT: [[TMP15:%.*]] = fadd <1 x float> [[TMP12]], [[TMP14]]
+; SPLIT_REMAINDER-NEXT: [[BLOCK17:%.*]] = shufflevector <3 x float> [[COL_LOAD3]], <3 x float> poison, <1 x i32> <i32 2>
+; SPLIT_REMAINDER-NEXT: [[TMP16:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 2
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x float> poison, float [[TMP16]], i64 0
+; SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x float> [[SPLAT_SPLATINSERT18]], <1 x float> poison, <1 x i32> zeroinitializer
+; SPLIT_REMAINDER-NEXT: [[TMP17:%.*]] = fmul <1 x float> [[BLOCK17]], [[SPLAT_SPLAT19]]
+; SPLIT_REMAINDER-NEXT: [[TMP18:%.*]] = fadd <1 x float> [[TMP15]], [[TMP17]]
+; SPLIT_REMAINDER-NEXT: [[TMP19:%.*]] = shufflevector <1 x float> [[TMP18]], <1 x float> poison, <3 x i32> <i32 0, i32 poison, i32 poison>
+; SPLIT_REMAINDER-NEXT: [[TMP20:%.*]] = shufflevector <3 x float> [[TMP10]], <3 x float> [[TMP19]], <3 x i32> <i32 0, i32 1, i32 3>
+; SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP20]], ptr [[C]], align 4
+; SPLIT_REMAINDER-NEXT: ret void
+;
+; NO_SPLIT_REMAINDER-LABEL: define void @matmul(
+; NO_SPLIT_REMAINDER-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, ptr [[A]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP:%.*]] = getelementptr float, ptr [[A]], i64 3
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD1:%.*]] = load <3 x float>, ptr [[VEC_GEP]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[VEC_GEP2:%.*]] = getelementptr float, ptr [[A]], i64 6
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD3:%.*]] = load <3 x float>, ptr [[VEC_GEP2]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, ptr [[B]], align 4
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP1:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <3 x float> poison, float [[TMP1]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP2:%.*]] = fmul <3 x float> [[BLOCK]], [[SPLAT_SPLAT]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK5:%.*]] = shufflevector <3 x float> [[COL_LOAD1]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP3:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 1
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <3 x float> poison, float [[TMP3]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT6]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP4:%.*]] = fmul <3 x float> [[BLOCK5]], [[SPLAT_SPLAT7]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP5:%.*]] = fadd <3 x float> [[TMP2]], [[TMP4]]
+; NO_SPLIT_REMAINDER-NEXT: [[BLOCK8:%.*]] = shufflevector <3 x float> [[COL_LOAD3]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP6:%.*]] = extractelement <3 x float> [[COL_LOAD4]], i64 2
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLATINSERT9:%.*]] = insertelement <3 x float> poison, float [[TMP6]], i64 0
+; NO_SPLIT_REMAINDER-NEXT: [[SPLAT_SPLAT10:%.*]] = shufflevector <3 x float> [[SPLAT_SPLATINSERT9]], <3 x float> poison, <3 x i32> zeroinitializer
+; NO_SPLIT_REMAINDER-NEXT: [[TMP7:%.*]] = fmul <3 x float> [[BLOCK8]], [[SPLAT_SPLAT10]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP8:%.*]] = fadd <3 x float> [[TMP5]], [[TMP7]]
+; NO_SPLIT_REMAINDER-NEXT: [[TMP9:%.*]] = shufflevector <3 x float> [[TMP8]], <3 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; NO_SPLIT_REMAINDER-NEXT: [[TMP10:%.*]] = shufflevector <3 x float> poison, <3 x float> [[TMP9]], <3 x i32> <i32 3, i32 4, i32 5>
+; NO_SPLIT_REMAINDER-NEXT: store <3 x float> [[TMP10]], ptr [[C]], align 4
+; NO_SPLIT_REMAINDER-NEXT: ret void
+;
+ %a_load = load <9 x float>, ptr %a, align 4
+ %b_load = load <3 x float>, ptr %b, align 4
+ %matmul = tail call <3 x float> @llvm.matrix.multiply.v9f32.v3f32.v3f32(<9 x float> %a_load, <3 x float> %b_load, i32 3, i32 3, i32 1)
+ store <3 x float> %matmul, ptr %c, align 4
+ ret void
+}
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/PGO-nontrivial-unswitch.ll b/llvm/test/Transforms/PhaseOrdering/unswitch-cold-func.ll
index 239397b..a6ebdf0 100644
--- a/llvm/test/Transforms/SimpleLoopUnswitch/PGO-nontrivial-unswitch.ll
+++ b/llvm/test/Transforms/PhaseOrdering/unswitch-cold-func.ll
@@ -1,13 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes='require<profile-summary>,function(loop-mssa(simple-loop-unswitch<nontrivial>))' -S | FileCheck %s
+; RUN: opt < %s -passes='pgo-force-function-attrs,function(loop-mssa(simple-loop-unswitch<nontrivial>))' -pgo-kind=pgo-instr-use-pipeline -pgo-cold-func-opt=optsize -S | FileCheck %s
+; RUN: opt < %s -passes='pgo-force-function-attrs,function(loop-mssa(simple-loop-unswitch<nontrivial>))' -pgo-kind=pgo-instr-use-pipeline -pgo-cold-func-opt=minsize -S | FileCheck %s
;; Check that non-trivial loop unswitching is not applied to a cold loop in a
;; cold loop nest.
;; IR was generated from the following loop nest, profiled when called
;; with M=0 and N=0.
-;; void hotFunction(bool cond, int M, int N, int * A, int *B, int *C) {
+;; void function(bool cond, int M, int N, int * A, int *B, int *C) {
;; for (unsigned j = 0; j < M; j++)
;; for (unsigned i=0; i < N; i++) {
;; A[i] = B[i] + C[i];
@@ -15,8 +16,8 @@
;; }
;; }
-define void @_Z11hotFunctionbiiPiS_S_(i1 %cond, i32 %M, i32 %N, ptr %A, ptr %B, ptr %C) !prof !36 {
-; CHECK-LABEL: define void @_Z11hotFunctionbiiPiS_S_
+define void @_Z11functionbiiPiS_S_(i1 %cond, i32 %M, i32 %N, ptr %A, ptr %B, ptr %C) !prof !36 {
+; CHECK-LABEL: define void @_Z11functionbiiPiS_S_
; CHECK-SAME: (i1 [[COND:%.*]], i32 [[M:%.*]], i32 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {{.*}}{
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP19_NOT:%.*]] = icmp eq i32 [[M]], 0
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll
index 9ab713c..383407b 100644
--- a/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-markloopasdeleted.ll
@@ -18,7 +18,6 @@
; the analysis caches.
;
; CHECK: Running pass: SimpleLoopUnswitchPass on loop %loop_begin in function test6
-; CHECK-NEXT: Running analysis: OuterAnalysisManagerProxy
; CHECK-NEXT: Clearing all analysis results for: loop_a_inner
diff --git a/llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll b/llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll
new file mode 100644
index 0000000..94b45d2
--- /dev/null
+++ b/llvm/test/tools/llvm-reduce/reduce-instructions-alloca.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-reduce --abort-on-invalid-reduction --delta-passes=instructions --test FileCheck --test-arg --check-prefixes=CHECK,INTERESTING --test-arg %s --test-arg --input-file %s -o %t
+; RUN: FileCheck -check-prefixes=CHECK,RESULT %s < %t
+
+; CHECK-LABEL: define void @alloca(
+; INTERESTING: call void @llvm.lifetime.start.p0(
+; INTERESTING: call void @llvm.lifetime.end.p0(
+
+; RESULT: call void @llvm.lifetime.start.p0(ptr poison)
+; RESULT-NEXT: call void @llvm.lifetime.end.p0(ptr poison)
+; RESULT-NEXT: ret void
+define void @alloca(ptr %ptr) {
+ %alloca = alloca i32, align 4
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
+ ret void
+}
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp b/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp
index f1f5d6b..19b69e8 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceInstructions.cpp
@@ -13,6 +13,8 @@
#include "ReduceInstructions.h"
#include "Utils.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
using namespace llvm;
@@ -37,7 +39,9 @@ void llvm::reduceInstructionsDeltaPass(Oracle &O, ReducerWorkItem &WorkItem) {
for (auto &Inst :
make_early_inc_range(make_range(BB.begin(), std::prev(BB.end())))) {
if (!shouldAlwaysKeep(Inst) && !O.shouldKeep()) {
- Inst.replaceAllUsesWith(getDefaultValue(Inst.getType()));
+ Inst.replaceAllUsesWith(isa<AllocaInst>(Inst)
+ ? PoisonValue::get(Inst.getType())
+ : getDefaultValue(Inst.getType()));
Inst.eraseFromParent();
}
}
diff --git a/llvm/utils/gn/secondary/lld/test/BUILD.gn b/llvm/utils/gn/secondary/lld/test/BUILD.gn
index dabc578..585e0a4 100644
--- a/llvm/utils/gn/secondary/lld/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lld/test/BUILD.gn
@@ -1,5 +1,6 @@
import("//llvm/lib/DebugInfo/PDB/enable_dia.gni")
import("//llvm/triples.gni")
+import("//llvm/utils/gn/build/libs/pthread/enable.gni")
import("//llvm/utils/gn/build/libs/xml/enable.gni")
import("//llvm/utils/gn/build/libs/zlib/enable.gni")
import("//llvm/utils/gn/build/libs/zstd/enable.gni")
@@ -88,6 +89,12 @@ write_lit_cfg("lit_site_cfg") {
extra_values += [ "LLVM_ENABLE_LIBXML2=0" ] # Must be 0.
}
+ if (llvm_enable_threads) {
+ extra_values += [ "LLVM_ENABLE_THREADS=1" ]
+ } else {
+ extra_values += [ "LLVM_ENABLE_THREADS=0" ] # Must be 0.
+ }
+
if (llvm_enable_zlib) {
extra_values += [ "LLVM_ENABLE_ZLIB=1" ]
} else {
diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt
index 3f8be5e..b570f8d 100644
--- a/llvm/utils/profcheck-xfail.txt
+++ b/llvm/utils/profcheck-xfail.txt
@@ -107,6 +107,7 @@ Instrumentation/AddressSanitizer/asan-stack-safety.ll
Instrumentation/AddressSanitizer/asan-struct-scalable.ll
Instrumentation/AddressSanitizer/asan-vp-load-store.ll
Instrumentation/AddressSanitizer/asan-vs-gvn.ll
+Instrumentation/AddressSanitizer/asan-win-dont-instrument-catchpad.ll
Instrumentation/AddressSanitizer/basic.ll
Instrumentation/AddressSanitizer/basic-msvc64.ll
Instrumentation/AddressSanitizer/byref-args.ll
diff --git a/mlir/Maintainers.md b/mlir/Maintainers.md
index 5d3b576c..b495d25 100644
--- a/mlir/Maintainers.md
+++ b/mlir/Maintainers.md
@@ -97,7 +97,7 @@ available, should be contacted first, as they're more active in those areas.
* ‘rocdl’ Dialect ([@krzysz00](https://github.com/krzysz00))
* ‘nvgpu’ Dialect ([@grypp](https://github.com/grypp))
* ‘nvvm’ Dialect ([@grypp](https://github.com/grypp))
-* ‘xegpu’ Dialect ([@chencha3](https://github.com/chencha3), [@Jianhui-Li](https://github.com/Jianhui-Li))
+* ‘xegpu’ Dialect ([@charithaintc](https://github.com/charithaintc), [@Jianhui-Li](https://github.com/Jianhui-Li))
* 'xevm' Dialect ([@silee2](https://github.com/silee2))
#### CPU Dialects
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 9f76f5d..70e3e45 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -807,7 +807,7 @@ def ConvertMathToSPIRVPass : Pass<"convert-math-to-spirv"> {
// MathToXeVM
//===----------------------------------------------------------------------===//
-def ConvertMathToXeVM : Pass<"convert-math-to-xevm", "ModuleOp"> {
+def ConvertMathToXeVM : Pass<"convert-math-to-xevm"> {
let summary =
"Convert (fast) math operations to native XeVM/SPIRV equivalents";
let description = [{
diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index e52b7d2..12a7935 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -330,7 +330,6 @@ def AffineForOp : Affine_Op<"for",
Speculation::Speculatability getSpeculatability();
}];
- let hasCanonicalizer = 1;
let hasCustomAssemblyFormat = 1;
let hasFolder = 1;
let hasRegionVerifier = 1;
diff --git a/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h b/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h
index 035235f..fccb49d 100644
--- a/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h
+++ b/mlir/include/mlir/Dialect/GPU/Pipelines/Passes.h
@@ -1,4 +1,4 @@
-//===- Passes.h - GPU NVVM pipeline entry points --------------------------===//
+//===- Passes.h - GPU pipeline entry points--------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -60,6 +60,52 @@ struct GPUToNVVMPipelineOptions
llvm::cl::init(false)};
};
+// Options for the gpu to xevm pipeline.
+struct GPUToXeVMPipelineOptions
+ : public PassPipelineOptions<GPUToXeVMPipelineOptions> {
+ PassOptions::Option<std::string> xegpuOpLevel{
+ *this, "xegpu-op-level",
+ llvm::cl::desc("Granularity of XeGPU operations to target: workgroup | "
+ "subgroup | lane"),
+ llvm::cl::init("workgroup")};
+ // General lowering controls.
+ PassOptions::Option<bool> use64bitIndex{
+ *this, "use-64bit-index",
+ llvm::cl::desc("Bitwidth of the index type (host & device)"),
+ llvm::cl::init(true)};
+ PassOptions::Option<bool> kernelBarePtrCallConv{
+ *this, "kernel-bare-ptr-calling-convention",
+ llvm::cl::desc("Use bare pointer calling convention for device kernels"),
+ llvm::cl::init(false)};
+ PassOptions::Option<bool> hostBarePtrCallConv{
+ *this, "host-bare-ptr-calling-convention",
+ llvm::cl::desc("Use bare pointer calling convention for host launches"),
+ llvm::cl::init(false)};
+ PassOptions::Option<std::string> binaryFormat{
+ *this, "binary-format",
+ llvm::cl::desc("Final GPU binary emission format (e.g. fatbin)"),
+ llvm::cl::init("fatbin")};
+ // Options mirroring xevm-attach-target (GpuXeVMAttachTarget).
+ PassOptions::Option<std::string> xevmModuleMatcher{
+ *this, "xevm-module-matcher",
+ llvm::cl::desc("Regex to match gpu.module names for XeVM target attach"),
+ llvm::cl::init("")};
+ PassOptions::Option<std::string> zebinTriple{
+ *this, "zebin-triple", llvm::cl::desc("Target triple for XeVM codegen"),
+ llvm::cl::init("spirv64-unknown-unknown")};
+ PassOptions::Option<std::string> zebinChip{
+ *this, "zebin-chip", llvm::cl::desc("Target chip (e.g. pvc, bmg)"),
+ llvm::cl::init("bmg")};
+ PassOptions::Option<unsigned> optLevel{
+ *this, "opt-level",
+ llvm::cl::desc("Optimization level for attached target/codegen"),
+ llvm::cl::init(2)};
+ PassOptions::Option<std::string> cmdOptions{
+ *this, "igc-cmd-options",
+ llvm::cl::desc("Additional downstream compiler command line options"),
+ llvm::cl::init("")};
+};
+
//===----------------------------------------------------------------------===//
// Building and Registering.
//===----------------------------------------------------------------------===//
@@ -70,8 +116,15 @@ struct GPUToNVVMPipelineOptions
void buildLowerToNVVMPassPipeline(OpPassManager &pm,
const GPUToNVVMPipelineOptions &options);
-/// Register all pipeleines for the `gpu` dialect.
+/// Adds the GPU to XeVM pipeline to the given pass manager. Transforms main
+/// dialects into XeVM targets. Begins with GPU code regions, then handles host
+/// code.
+void buildLowerToXeVMPassPipeline(OpPassManager &pm,
+ const GPUToXeVMPipelineOptions &options);
+
+/// Register all pipelines for the `gpu` dialect.
void registerGPUToNVVMPipeline();
+void registerGPUToXeVMPipeline();
} // namespace gpu
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 68f31e6..d2df244 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -574,6 +574,30 @@ def ROCDL_wmma_f32_16x16x16_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x16.fp8_b
def ROCDL_wmma_f32_16x16x16_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x16.bf8_bf8", [1]>;
def ROCDL_wmma_f32_16x16x16_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x16.bf8_fp8", [1]>;
def ROCDL_wmma_i32_16x16x32_iu4 : ROCDL_Wmma_IntrOp<"wmma.i32.16x16x32.iu4", [1]>;
+// Available from gfx1250
+def ROCDL_wmma_f32_16x16x4_f32 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x4.f32", [1]>;
+def ROCDL_wmma_f32_16x16x32_bf16 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x32.bf16", [1]>;
+def ROCDL_wmma_f32_16x16x32_f16 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x32.f16", [1]>;
+def ROCDL_wmma_f16_16x16x32_f16 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x32.f16", [1]>;
+def ROCDL_wmma_bf16_16x16x32_bf16 : ROCDL_Wmma_IntrOp<"wmma.bf16.16x16x32.bf16", [1]>;
+def ROCDL_wmma_bf16f32_16x16x32_bf16 : ROCDL_Wmma_IntrOp<"wmma.bf16f32.16x16x32.bf16", [1,5]>;
+def ROCDL_wmma_f32_16x16x64_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.fp8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x64_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.fp8_bf8", [0]>;
+def ROCDL_wmma_f32_16x16x64_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.bf8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x64_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x64.bf8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x64_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.fp8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x64_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.fp8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x64_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.bf8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x64_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x64.bf8_bf8", [0]>;
+def ROCDL_wmma_f32_16x16x128_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.fp8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x128_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.fp8_bf8", [0]>;
+def ROCDL_wmma_f32_16x16x128_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.bf8_fp8", [0]>;
+def ROCDL_wmma_f32_16x16x128_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f32.16x16x128.bf8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x128_fp8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.fp8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x128_fp8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.fp8_bf8", [0]>;
+def ROCDL_wmma_f16_16x16x128_bf8_fp8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.bf8_fp8", [0]>;
+def ROCDL_wmma_f16_16x16x128_bf8_bf8 : ROCDL_Wmma_IntrOp<"wmma.f16.16x16x128.bf8_bf8", [0]>;
+def ROCDL_wmma_i32_16x16x64_iu8 : ROCDL_Wmma_IntrOp<"wmma.i32.16x16x64.iu8", [1]>;
//===---------------------------------------------------------------------===//
// LDS transpose intrinsics (available in GFX950)
@@ -1143,6 +1167,7 @@ foreach smallT = [
ScaleArgInfo<ROCDL_V16BF16Type, "Bf16">,
ScaleArgInfo<ROCDL_V16F32Type, "F32">,
] in {
+ // Up-scaling
def ROCDL_CvtPkScalePk16 # largeT.nameForOp # smallT.nameForOp # Op :
ROCDL_ConcreteNonMemIntrOp<"cvt.scale.pk16." # largeT.name # "." # smallT.name,
[Pure], 1, [2], ["scaleSel"]>,
@@ -1158,6 +1183,42 @@ foreach smallT = [
}];
}
+
+ // Down-scaling
+ def ROCDL_CvtScaleF32Pk16 # smallT.nameForOp # largeT.nameForOp # Op :
+ ROCDL_ConcreteNonMemIntrOp<"cvt.scalef32.pk16." # smallT.name # "." # largeT.name,
+ [Pure], 1>,
+ Arguments<(ins largeT.type:$src, F32:$scale)> {
+ let results = (outs smallT.type:$res);
+ let summary = "Scale and convert packed "
+ # largeT.name # " to packed " # smallT.name ;
+ let description = [{
+ Convert 8 packed }] # largeT.name # [{ values to packed }]
+ # smallT.name # [{, multiplying by the exponent part of `scale`
+ before doing so. This op is for gfx1250+ arch.
+ }];
+ let assemblyFormat = [{
+ attr-dict $src `,` $scale `:` type($res)
+ }];
+ }
+
+ def ROCDL_CvtScaleF32SrPk16 # smallT.nameForOp # largeT.nameForOp # Op :
+ ROCDL_ConcreteNonMemIntrOp<"cvt.scalef32.sr.pk16." # smallT.name # "." # largeT.name,
+ [Pure], 1>,
+ Arguments<(ins largeT.type:$src, I32:$seed, F32:$scale)> {
+ let results = (outs smallT.type:$res);
+ let summary = "Scale and convert packed "
+ # largeT.name # " to packed " # smallT.name # " with stochastic rounding";
+ let description = [{
+ Convert 8 packed }] # largeT.name # [{ values to packed }]
+ # smallT.name # [{, multiplying by the exponent part of `scale`
+ before doing so and apply stochastic rounding. This op is for gfx1250+ arch.
+ }];
+ let assemblyFormat = [{
+ attr-dict $src `,` $seed `,` $scale `:` type($res)
+ }];
+ }
+
} // foreach largeT
} // foreach smallTOp
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index ae7a085..c89fc59 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -25,7 +25,6 @@
#include "mlir/Interfaces/TilingInterface.h"
#include "mlir/Transforms/DialectConversion.h"
#include "llvm/ADT/SmallBitVector.h"
-#include "llvm/ADT/SmallSet.h"
namespace mlir {
namespace bufferization {
@@ -621,35 +620,43 @@ LogicalResult rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
/// In the future, more general interfaces can be devised to encode similar
/// shape evolutions and map between an op and its operands.
SmallVector<OpFoldResult>
-computePaddedShape(RewriterBase &rewriter, TypedValue<RankedTensorType> v,
+computePaddedShape(OpBuilder &, TypedValue<RankedTensorType> v,
AffineMap indexingMap, ArrayRef<OpFoldResult> indexingSizes,
const PadTilingInterfaceOptions &options);
using PadSizeComputationFunction =
std::function<FailureOr<SmallVector<OpFoldResult>>(
- RewriterBase &, OpOperand &, ArrayRef<Range>,
+ OpBuilder &, OpOperand &, ArrayRef<Range>,
const PadTilingInterfaceOptions &)>;
/// Specific helper for Linalg ops.
-FailureOr<SmallVector<OpFoldResult>> computeIndexingMapOpInterfacePaddedShape(
- RewriterBase &rewriter, OpOperand &operandToPad,
- ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options);
+FailureOr<SmallVector<OpFoldResult>>
+computeIndexingMapOpInterfacePaddedShape(OpBuilder &, OpOperand &operandToPad,
+ ArrayRef<Range> iterationDomain,
+ const PadTilingInterfaceOptions &);
+
+/// Operations and values created in the process of padding a TilingInterface
+/// operation.
+struct PadTilingInterfaceResult {
+ /// The operands of the padded op.
+ SmallVector<tensor::PadOp> padOps;
+ /// The padded op, a clone of `toPad` with padded operands.
+ TilingInterface paddedOp;
+ /// Slices of the padded op's results, same types as `toPad`.
+ SmallVector<Value> replacements;
+};
-/// Pad the iterator dimensions `options.paddingDimensions` of `opToPad`.
-///
+/// Pad the iterator dimensions of `toPad`.
/// * "options.paddingSizes" indicates that each padding dimension should be
/// padded to the specified padding size.
/// * "options.padToMultipleOf" indicates that the paddingSizes should be
// interpreted as the bounding box (dynamic) value to pad to.
/// * Use "options.paddingValues" to set the padding value of the created
// tensor::PadOp.
-/// * The tensor::PadOp is returned on success.
-
-FailureOr<TilingInterface>
-rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
- const PadTilingInterfaceOptions &constOptions,
- SmallVector<tensor::PadOp> &padOps,
- const PadSizeComputationFunction &computePaddingSizeFun =
+FailureOr<PadTilingInterfaceResult>
+rewriteAsPaddedOp(OpBuilder &, TilingInterface toPad,
+ PadTilingInterfaceOptions options,
+ const PadSizeComputationFunction & =
&computeIndexingMapOpInterfacePaddedShape);
namespace detail {
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 749e2ba..e0a53cd 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -2600,6 +2600,65 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) {
return success(folded);
}
+/// Returns constant trip count in trivial cases.
+static std::optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
+ int64_t step = forOp.getStepAsInt();
+ if (!forOp.hasConstantBounds() || step <= 0)
+ return std::nullopt;
+ int64_t lb = forOp.getConstantLowerBound();
+ int64_t ub = forOp.getConstantUpperBound();
+ return ub - lb <= 0 ? 0 : (ub - lb + step - 1) / step;
+}
+
+/// Fold the empty loop.
+static SmallVector<OpFoldResult> AffineForEmptyLoopFolder(AffineForOp forOp) {
+ if (!llvm::hasSingleElement(*forOp.getBody()))
+ return {};
+ if (forOp.getNumResults() == 0)
+ return {};
+ std::optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
+ if (tripCount == 0) {
+ // The initial values of the iteration arguments would be the op's
+ // results.
+ return forOp.getInits();
+ }
+ SmallVector<Value, 4> replacements;
+ auto yieldOp = cast<AffineYieldOp>(forOp.getBody()->getTerminator());
+ auto iterArgs = forOp.getRegionIterArgs();
+ bool hasValDefinedOutsideLoop = false;
+ bool iterArgsNotInOrder = false;
+ for (unsigned i = 0, e = yieldOp->getNumOperands(); i < e; ++i) {
+ Value val = yieldOp.getOperand(i);
+ BlockArgument *iterArgIt = llvm::find(iterArgs, val);
+ // TODO: It should be possible to perform a replacement by computing the
+ // last value of the IV based on the bounds and the step.
+ if (val == forOp.getInductionVar())
+ return {};
+ if (iterArgIt == iterArgs.end()) {
+ // `val` is defined outside of the loop.
+ assert(forOp.isDefinedOutsideOfLoop(val) &&
+ "must be defined outside of the loop");
+ hasValDefinedOutsideLoop = true;
+ replacements.push_back(val);
+ } else {
+ unsigned pos = std::distance(iterArgs.begin(), iterArgIt);
+ if (pos != i)
+ iterArgsNotInOrder = true;
+ replacements.push_back(forOp.getInits()[pos]);
+ }
+ }
+ // Bail out when the trip count is unknown and the loop returns any value
+ // defined outside of the loop or any iterArg out of order.
+ if (!tripCount.has_value() &&
+ (hasValDefinedOutsideLoop || iterArgsNotInOrder))
+ return {};
+ // Bail out when the loop iterates more than once and it returns any iterArg
+ // out of order.
+ if (tripCount.has_value() && tripCount.value() >= 2 && iterArgsNotInOrder)
+ return {};
+ return llvm::to_vector_of<OpFoldResult>(replacements);
+}
+
/// Canonicalize the bounds of the given loop.
static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
SmallVector<Value, 4> lbOperands(forOp.getLowerBoundOperands());
@@ -2631,79 +2690,30 @@ static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
return success();
}
-namespace {
-/// Returns constant trip count in trivial cases.
-static std::optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
- int64_t step = forOp.getStepAsInt();
- if (!forOp.hasConstantBounds() || step <= 0)
- return std::nullopt;
- int64_t lb = forOp.getConstantLowerBound();
- int64_t ub = forOp.getConstantUpperBound();
- return ub - lb <= 0 ? 0 : (ub - lb + step - 1) / step;
+/// Returns true if the affine.for has zero iterations in trivial cases.
+static bool hasTrivialZeroTripCount(AffineForOp op) {
+ return getTrivialConstantTripCount(op) == 0;
}
-/// This is a pattern to fold trivially empty loop bodies.
-/// TODO: This should be moved into the folding hook.
-struct AffineForEmptyLoopFolder : public OpRewritePattern<AffineForOp> {
- using OpRewritePattern<AffineForOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(AffineForOp forOp,
- PatternRewriter &rewriter) const override {
- // Check that the body only contains a yield.
- if (!llvm::hasSingleElement(*forOp.getBody()))
- return failure();
- if (forOp.getNumResults() == 0)
- return success();
- std::optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
- if (tripCount == 0) {
- // The initial values of the iteration arguments would be the op's
- // results.
- rewriter.replaceOp(forOp, forOp.getInits());
- return success();
- }
- SmallVector<Value, 4> replacements;
- auto yieldOp = cast<AffineYieldOp>(forOp.getBody()->getTerminator());
- auto iterArgs = forOp.getRegionIterArgs();
- bool hasValDefinedOutsideLoop = false;
- bool iterArgsNotInOrder = false;
- for (unsigned i = 0, e = yieldOp->getNumOperands(); i < e; ++i) {
- Value val = yieldOp.getOperand(i);
- auto *iterArgIt = llvm::find(iterArgs, val);
- // TODO: It should be possible to perform a replacement by computing the
- // last value of the IV based on the bounds and the step.
- if (val == forOp.getInductionVar())
- return failure();
- if (iterArgIt == iterArgs.end()) {
- // `val` is defined outside of the loop.
- assert(forOp.isDefinedOutsideOfLoop(val) &&
- "must be defined outside of the loop");
- hasValDefinedOutsideLoop = true;
- replacements.push_back(val);
- } else {
- unsigned pos = std::distance(iterArgs.begin(), iterArgIt);
- if (pos != i)
- iterArgsNotInOrder = true;
- replacements.push_back(forOp.getInits()[pos]);
- }
- }
- // Bail out when the trip count is unknown and the loop returns any value
- // defined outside of the loop or any iterArg out of order.
- if (!tripCount.has_value() &&
- (hasValDefinedOutsideLoop || iterArgsNotInOrder))
- return failure();
- // Bail out when the loop iterates more than once and it returns any iterArg
- // out of order.
- if (tripCount.has_value() && tripCount.value() >= 2 && iterArgsNotInOrder)
- return failure();
- rewriter.replaceOp(forOp, replacements);
- return success();
+LogicalResult AffineForOp::fold(FoldAdaptor adaptor,
+ SmallVectorImpl<OpFoldResult> &results) {
+ bool folded = succeeded(foldLoopBounds(*this));
+ folded |= succeeded(canonicalizeLoopBounds(*this));
+ if (hasTrivialZeroTripCount(*this) && getNumResults() != 0) {
+ // The initial values of the loop-carried variables (iter_args) are the
+ // results of the op. But this must be avoided for an affine.for op that
+ // does not return any results. Since ops that do not return results cannot
+ // be folded away, we would enter an infinite loop of folds on the same
+ // affine.for op.
+ results.assign(getInits().begin(), getInits().end());
+ folded = true;
}
-};
-} // namespace
-
-void AffineForOp::getCanonicalizationPatterns(RewritePatternSet &results,
- MLIRContext *context) {
- results.add<AffineForEmptyLoopFolder>(context);
+ SmallVector<OpFoldResult> foldResults = AffineForEmptyLoopFolder(*this);
+ if (!foldResults.empty()) {
+ results.assign(foldResults);
+ folded = true;
+ }
+ return success(folded);
}
OperandRange AffineForOp::getEntrySuccessorOperands(RegionBranchPoint point) {
@@ -2746,27 +2756,6 @@ void AffineForOp::getSuccessorRegions(
regions.push_back(RegionSuccessor(getResults()));
}
-/// Returns true if the affine.for has zero iterations in trivial cases.
-static bool hasTrivialZeroTripCount(AffineForOp op) {
- return getTrivialConstantTripCount(op) == 0;
-}
-
-LogicalResult AffineForOp::fold(FoldAdaptor adaptor,
- SmallVectorImpl<OpFoldResult> &results) {
- bool folded = succeeded(foldLoopBounds(*this));
- folded |= succeeded(canonicalizeLoopBounds(*this));
- if (hasTrivialZeroTripCount(*this) && getNumResults() != 0) {
- // The initial values of the loop-carried variables (iter_args) are the
- // results of the op. But this must be avoided for an affine.for op that
- // does not return any results. Since ops that do not return results cannot
- // be folded away, we would enter an infinite loop of folds on the same
- // affine.for op.
- results.assign(getInits().begin(), getInits().end());
- folded = true;
- }
- return success(folded);
-}
-
AffineBound AffineForOp::getLowerBound() {
return AffineBound(*this, getLowerBoundOperands(), getLowerBoundMap());
}
diff --git a/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt b/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt
index 70a9c77..ec68acf 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt
+++ b/mlir/lib/Dialect/GPU/Pipelines/CMakeLists.txt
@@ -1,5 +1,6 @@
add_mlir_dialect_library(MLIRGPUPipelines
GPUToNVVMPipeline.cpp
+ GPUToXeVMPipeline.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/GPU
@@ -11,12 +12,17 @@ add_mlir_dialect_library(MLIRGPUPipelines
MLIRTransforms
MLIRLinalgTransforms
MLIRAffineToStandard
+ MLIRGPUToLLVMSPV
MLIRGPUToNVVMTransforms
MLIRIndexToLLVM
MLIRMathToLLVM
+ MLIRMathToXeVM
MLIRNVGPUToNVVM
MLIRNVVMToLLVM
MLIRReconcileUnrealizedCasts
MLIRSCFToControlFlow
MLIRVectorToSCF
+ MLIRXeGPUTransforms
+ MLIRXeGPUToXeVM
+ MLIRXeVMToLLVM
)
diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp
new file mode 100644
index 0000000..1a1485b
--- /dev/null
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToXeVMPipeline.cpp
@@ -0,0 +1,139 @@
+//===- GPUToXeVMPipeline.cpp - Lowering pipeline to XeVM/LLVM -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass for testing the lowering to XeVM as a generally
+// usable sink pass. If XeGPU ops are used, it expects the MLIR code to have
+// XeGPU ops already embedded in gpu code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
+#include "mlir/Conversion/MathToXeVM/MathToXeVM.h"
+#include "mlir/Conversion/Passes.h"
+#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
+#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Conversion/XeGPUToXeVM/XeGPUToXeVM.h"
+#include "mlir/Conversion/XeVMToLLVM/XeVMToLLVM.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/GPU/IR/GPUDialect.h"
+#include "mlir/Dialect/GPU/Pipelines/Passes.h"
+#include "mlir/Dialect/GPU/Transforms/Passes.h"
+#include "mlir/Dialect/LLVMIR/Transforms/RequestCWrappers.h"
+#include "mlir/Dialect/MemRef/Transforms/Passes.h"
+#include "mlir/Dialect/XeGPU/Transforms/Passes.h"
+#include "mlir/Pass/PassManager.h"
+#include "mlir/Pass/PassOptions.h"
+#include "mlir/Target/LLVM/XeVM/Target.h"
+#include "mlir/Transforms/Passes.h"
+
+using namespace mlir;
+
+namespace {
+//===----------------------------------------------------------------------===//
+// Pre-GPU common pipeline for both Host and GPU.
+//===----------------------------------------------------------------------===//
+void buildPreGPUCommonPassPipeline(
+ OpPassManager &pm, const mlir::gpu::GPUToXeVMPipelineOptions &options) {
+ // builtin.module scope passes.
+ pm.addPass(createCSEPass());
+ pm.addPass(createConvertVectorToSCFPass());
+ {
+ GpuXeVMAttachTargetOptions xevmTargetOptions;
+ xevmTargetOptions.moduleMatcher = options.xevmModuleMatcher;
+ xevmTargetOptions.triple = options.zebinTriple;
+ xevmTargetOptions.chip = options.zebinChip;
+ xevmTargetOptions.optLevel = options.optLevel;
+ xevmTargetOptions.cmdOptions = options.cmdOptions;
+ pm.addPass(createGpuXeVMAttachTarget(xevmTargetOptions));
+ }
+ pm.addPass(createLowerAffinePass());
+ pm.addNestedPass<func::FuncOp>(createGpuAsyncRegionPass());
+}
+
+//===----------------------------------------------------------------------===//
+// GPUModule-specific stuff.
+//===----------------------------------------------------------------------===//
+void buildGPUPassPipeline(OpPassManager &pm,
+ const mlir::gpu::GPUToXeVMPipelineOptions &options) {
+ if (options.xegpuOpLevel == "workgroup") {
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUWgToSgDistribute());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUBlocking());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCanonicalizerPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ }
+ if (options.xegpuOpLevel == "subgroup" ||
+ options.xegpuOpLevel == "workgroup") {
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUPropagateLayout());
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUSubgroupDistribute());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCanonicalizerPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createLoopInvariantCodeMotionPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(xegpu::createXeGPUVectorLinearize());
+ }
+ pm.addNestedPass<gpu::GPUModuleOp>(createConvertMathToXeVM());
+ pm.addNestedPass<gpu::GPUModuleOp>(createConvertXeGPUToXeVMPass());
+ {
+ ConvertGpuOpsToLLVMSPVOpsOptions gpuToLLVMSPVOptions;
+ gpuToLLVMSPVOptions.use64bitIndex = options.use64bitIndex;
+ pm.addNestedPass<gpu::GPUModuleOp>(
+ createConvertGpuOpsToLLVMSPVOps(gpuToLLVMSPVOptions));
+ }
+ pm.addNestedPass<gpu::GPUModuleOp>(createCSEPass());
+ pm.addNestedPass<gpu::GPUModuleOp>(createReconcileUnrealizedCastsPass());
+}
+
+//===----------------------------------------------------------------------===//
+// Post-GPU pipeline for both Host and GPU.
+//===----------------------------------------------------------------------===//
+void buildPostGPUCommonPassPipeline(
+ OpPassManager &pm, const mlir::gpu::GPUToXeVMPipelineOptions &options) {
+ // builtin.module scope passes.
+ pm.addPass(createSCFToControlFlowPass());
+ pm.addPass(memref::createExpandStridedMetadataPass());
+ {
+ GpuToLLVMConversionPassOptions gpuToLLVMOptions;
+ gpuToLLVMOptions.hostBarePtrCallConv = options.hostBarePtrCallConv;
+ gpuToLLVMOptions.kernelBarePtrCallConv = options.kernelBarePtrCallConv;
+ pm.addPass(createGpuToLLVMConversionPass(gpuToLLVMOptions));
+ }
+ pm.addPass(createLowerAffinePass());
+ pm.addPass(createConvertToLLVMPass());
+ pm.addPass(createReconcileUnrealizedCastsPass());
+ // gpu-module-to-binary
+ {
+ GpuModuleToBinaryPassOptions gpuToModuleBinOptions;
+ gpuToModuleBinOptions.compilationTarget = options.binaryFormat;
+ gpuToModuleBinOptions.cmdOptions = options.cmdOptions;
+ pm.addPass(createGpuModuleToBinaryPass(gpuToModuleBinOptions));
+ }
+}
+} // namespace
+
+void mlir::gpu::buildLowerToXeVMPassPipeline(
+ OpPassManager &pm, const GPUToXeVMPipelineOptions &options) {
+ // Pre-GPU common pipelines.
+ buildPreGPUCommonPassPipeline(pm, options);
+
+ // GPUModule-specific stuff.
+ buildGPUPassPipeline(pm, options);
+
+ // Post-GPU pipeline for both Host and GPU.
+ buildPostGPUCommonPassPipeline(pm, options);
+}
+
+void mlir::gpu::registerGPUToXeVMPipeline() {
+ PassPipelineRegistration<GPUToXeVMPipelineOptions>(
+ "gpu-lower-to-xevm-pipeline",
+ "The default GPU to XeVM lowering pipeline. It starts by lowering GPU "
+ "code to the "
+ "specified compilation target (default is fatbin) then lowers the host "
+ "code.",
+ buildLowerToXeVMPassPipeline);
+}
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 6192d79..9a8a63e 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2457,26 +2457,24 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
}
// Set options.
- TilingInterface paddedOp;
PadTilingInterfaceOptions options;
options.setPaddingValues(paddingValues)
.setPaddingSizes(getMixedPaddingSizes())
.setPadToMultipleOf(getPadToMultipleOf());
- // Apply padding.
- SmallVector<tensor::PadOp> newPadOps;
- FailureOr<TilingInterface> maybePaddedOp = rewriteAsPaddedOp(
- rewriter, cast<TilingInterface>(targetOp.getOperation()), options,
- newPadOps);
- if (failed(maybePaddedOp)) {
+ auto maybePadOps = rewriteAsPaddedOp(
+ rewriter, cast<TilingInterface>(targetOp.getOperation()), options);
+ if (failed(maybePadOps)) {
auto diag = emitSilenceableError() << "failed to pad op";
diag.attachNote(target->getLoc()) << "target op";
return diag;
}
+ const auto &[paddedOperands, paddedOp, slicedResults] = maybePadOps.value();
// Set transform results.
- paddedOps.push_back(cast<TilingInterface>(maybePaddedOp->getOperation()));
- padOps.append(newPadOps.begin(), newPadOps.end());
+ paddedOps.push_back(paddedOp);
+ padOps.append(paddedOperands.begin(), paddedOperands.end());
+ rewriter.replaceOp(targetOp.getOperation(), slicedResults);
}
results.set(cast<OpResult>(getPadded()), paddedOps);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index 0956c5d..3e787a2 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -95,10 +95,11 @@ static int64_t extractConstantMultiplier(AffineExpr expr) {
/// - affine_map<(d0, d1) -> (d0 * 3 + d1)>
/// In the future, more general interfaces can be devised to encode similar
/// shape evolutions and map between an op and its operands.
-SmallVector<OpFoldResult> linalg::computePaddedShape(
- RewriterBase &rewriter, TypedValue<RankedTensorType> v,
- AffineMap indexingMap, ArrayRef<OpFoldResult> indexingSizes,
- const PadTilingInterfaceOptions &options) {
+SmallVector<OpFoldResult>
+linalg::computePaddedShape(OpBuilder &builder, TypedValue<RankedTensorType> v,
+ AffineMap indexingMap,
+ ArrayRef<OpFoldResult> indexingSizes,
+ const PadTilingInterfaceOptions &options) {
Location loc = v.getLoc();
SmallVector<OpFoldResult> paddedShape;
auto tensorType = cast<RankedTensorType>(v.getType());
@@ -109,7 +110,7 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
// "Full-rank" padding specification.
SmallVector<OpFoldResult> paddingSizes =
- getFullRankPaddingSizes(rewriter, indexingSizes, options);
+ getFullRankPaddingSizes(builder, indexingSizes, options);
// For each dimension in the operand's shape, iterate over indexingSizes and
// add the various term contributions.
@@ -147,28 +148,27 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
OpFoldResult paddingDimOfr;
if (options.padToMultipleOf) {
AffineExpr d0, s0;
- bindDims(rewriter.getContext(), d0);
- bindSymbols(rewriter.getContext(), s0);
+ bindDims(builder.getContext(), d0);
+ bindSymbols(builder.getContext(), s0);
AffineMap ceilMap = AffineMap::get(1, 1, d0.ceilDiv(s0) * s0);
AffineMap composedMap = projectedMap.compose(ceilMap);
paddingDimOfr = affine::makeComposedFoldedAffineApply(
- rewriter, loc, composedMap,
- {indexingSizes[paddingDim], paddingSize},
+ builder, loc, composedMap, {indexingSizes[paddingDim], paddingSize},
/*composeAffineMin=*/true);
} else {
// Otherwise just set to paddingSize.
paddingDimOfr = affine::makeComposedFoldedAffineApply(
- rewriter, loc, projectedMap, paddingSize);
+ builder, loc, projectedMap, paddingSize);
}
// Adjust for the maximum accessed index, which is (paddingSize - 1) *
// multiplier.
AffineExpr d0;
- bindDims(rewriter.getContext(), d0);
+ bindDims(builder.getContext(), d0);
int64_t multiplier = extractConstantMultiplier(projectedMap.getResult(0));
AffineMap subtractMap = AffineMap::get(1, 0, d0 - multiplier);
OpFoldResult maxAccessIdx = affine::makeComposedFoldedAffineApply(
- rewriter, loc, subtractMap, {paddingDimOfr});
+ builder, loc, subtractMap, {paddingDimOfr});
terms.push_back(maxAccessIdx);
LLVM_DEBUG(DBGS() << "------new term: " << terms.back() << "\n");
@@ -177,19 +177,19 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
// If there are no terms, just return the dim.
if (terms.empty()) {
paddedShape[resultIndex] =
- createFoldedDimOp(rewriter, loc, v, resultIndex);
+ createFoldedDimOp(builder, loc, v, resultIndex);
continue;
}
// Sum individual terms' contributions.
SmallVector<AffineExpr> dims(terms.size());
- bindDimsList(rewriter.getContext(), MutableArrayRef{dims});
+ bindDimsList(builder.getContext(), MutableArrayRef{dims});
AffineExpr sumExpr = dims.front();
for (unsigned i = 1; i < dims.size(); ++i)
sumExpr = sumExpr + dims[i];
// Add 1 to the maximum accessed index and get the final padded size.
- OpFoldResult paddedDimOfr = affine::makeComposedFoldedAffineApply(
- rewriter, loc, sumExpr + 1, terms);
+ OpFoldResult paddedDimOfr =
+ affine::makeComposedFoldedAffineApply(builder, loc, sumExpr + 1, terms);
paddedShape[resultIndex] = paddedDimOfr;
}
@@ -198,7 +198,7 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
FailureOr<SmallVector<OpFoldResult>>
linalg::computeIndexingMapOpInterfacePaddedShape(
- RewriterBase &rewriter, OpOperand &operandToPad,
+ OpBuilder &builder, OpOperand &operandToPad,
ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options) {
auto transferOp =
llvm::dyn_cast<IndexingMapOpInterface>(operandToPad.getOwner());
@@ -206,9 +206,9 @@ linalg::computeIndexingMapOpInterfacePaddedShape(
return failure();
// clang-format off
- assert(llvm::all_of(iterationDomain, [&rewriter](Range r) {
- return r.offset == OpFoldResult(rewriter.getIndexAttr(0)) &&
- r.stride == OpFoldResult(rewriter.getIndexAttr(1));
+ assert(llvm::all_of(iterationDomain, [&builder](Range r) {
+ return r.offset == OpFoldResult(builder.getIndexAttr(0)) &&
+ r.stride == OpFoldResult(builder.getIndexAttr(1));
}) && "expected 0-offset 1-stride loop ranges");
// clang-format on
SmallVector<OpFoldResult> loopUpperBounds;
@@ -218,13 +218,13 @@ linalg::computeIndexingMapOpInterfacePaddedShape(
AffineMap indexingMap = transferOp.getMatchingIndexingMap(&operandToPad);
return computePaddedShape(
- rewriter, cast<TypedValue<RankedTensorType>>(operandToPad.get()),
+ builder, cast<TypedValue<RankedTensorType>>(operandToPad.get()),
indexingMap, loopUpperBounds, options);
}
/// Pad a single operand to `paddedShape` using `paddingValueAttr` as padding
/// Value.
-static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
+static Value padOperand(OpBuilder &builder, TilingInterface opToPad,
TypedValue<RankedTensorType> v,
ArrayRef<OpFoldResult> paddedShape,
Attribute paddingValueAttr) {
@@ -232,15 +232,15 @@ static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
if (auto complexTy =
dyn_cast<ComplexType>(getElementTypeOrSelf(v.getType()))) {
if (auto complexAttr = dyn_cast<ArrayAttr>(paddingValueAttr)) {
- paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
+ paddingValue = complex::ConstantOp::create(builder, opToPad.getLoc(),
complexTy, complexAttr);
}
} else if (isa<ub::PoisonAttr>(paddingValueAttr)) {
- paddingValue = ub::PoisonOp::create(rewriter, opToPad.getLoc(),
+ paddingValue = ub::PoisonOp::create(builder, opToPad.getLoc(),
getElementTypeOrSelf(v.getType()));
} else if (auto typedAttr = dyn_cast<TypedAttr>(paddingValueAttr)) {
paddingValue =
- arith::ConstantOp::create(rewriter, opToPad.getLoc(), typedAttr);
+ arith::ConstantOp::create(builder, opToPad.getLoc(), typedAttr);
}
assert(paddingValue && "failed to create value from padding attribute");
@@ -259,49 +259,48 @@ static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
RankedTensorType::get(tensorShape, getElementTypeOrSelf(v));
LLVM_DEBUG(DBGS() << "--SUCCESS, makeComposedPadHighOp with type: "
<< paddedTensorType);
- return makeComposedPadHighOp(rewriter, opToPad.getLoc(), paddedTensorType, v,
+ return makeComposedPadHighOp(builder, opToPad.getLoc(), paddedTensorType, v,
paddingValue, /*nofold=*/false, dynDims);
}
-FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
- RewriterBase &rewriter, TilingInterface opToPad,
- const PadTilingInterfaceOptions &constOptions,
- SmallVector<tensor::PadOp> &padOps,
+FailureOr<PadTilingInterfaceResult> linalg::rewriteAsPaddedOp(
+ OpBuilder &builder, TilingInterface toPad,
+ PadTilingInterfaceOptions options,
const PadSizeComputationFunction &computePaddingSizeFun) {
- LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n");
+ LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << toPad << "\n");
+ SmallVector<tensor::PadOp> padOps;
+ Location loc = toPad.getLoc();
- Location loc = opToPad.getLoc();
- PadTilingInterfaceOptions options(constOptions);
// Allow inference of pad values if they are not explicitly specified.
// TODO: be mindful about the value depending on the actual operation.
if (options.paddingValues.empty()) {
- SmallVector<Type> types(opToPad->getOperandTypes());
- llvm::append_range(types, opToPad->getResultTypes());
+ SmallVector<Type> types(toPad->getOperandTypes());
+ llvm::append_range(types, toPad->getResultTypes());
for (Type t : types) {
options.paddingValues.push_back(
- rewriter.getZeroAttr(getElementTypeOrSelf(t)));
+ builder.getZeroAttr(getElementTypeOrSelf(t)));
}
}
- if (llvm::any_of(opToPad->getOperands(),
+ if (llvm::any_of(toPad->getOperands(),
[](Value v) { return isa<MemRefType>(v.getType()); })) {
- return rewriter.notifyMatchFailure(opToPad,
- "expected operation on tensors");
+ LLVM_DEBUG(DBGS() << "Not an operation on tensors: FAIL\n");
+ return failure();
}
- OpBuilder::InsertionGuard g(rewriter);
- // Set IP after opToPad because we also take the dims of opToPad's output.
- rewriter.setInsertionPointAfter(opToPad);
+ OpBuilder::InsertionGuard g(builder);
+ // Set IP after toPad because we also take the dims of toPad's output.
+ builder.setInsertionPointAfter(toPad);
// 1. Get the loopUpperBounds from the TilingInterface.
- SmallVector<Range> iterationDomain = opToPad.getIterationDomain(rewriter);
+ SmallVector<Range> iterationDomain = toPad.getIterationDomain(builder);
// 2. For each operand.
SmallVector<Value> newOperands;
- newOperands.reserve(opToPad->getNumOperands());
- for (OpOperand &opOperand : opToPad->getOpOperands()) {
+ newOperands.reserve(toPad->getNumOperands());
+ for (OpOperand &opOperand : toPad->getOpOperands()) {
Value operand = opOperand.get();
- LLVM_DEBUG(DBGS() << "--start padding oprd: " << operand << "\n");
+ LLVM_DEBUG(DBGS() << "--start padding operand: " << operand << "\n");
// 2.a. Skip scalar-like operands.
Type operandType = operand.getType();
@@ -311,30 +310,31 @@ FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
newOperands.push_back(operand);
continue;
}
+
// 2.a. Compute padded shape.
FailureOr<SmallVector<OpFoldResult>> maybePaddedShape =
- computePaddingSizeFun(rewriter, opOperand, iterationDomain, options);
+ computePaddingSizeFun(builder, opOperand, iterationDomain, options);
if (failed(maybePaddedShape)) {
- return rewriter.notifyMatchFailure(opToPad, "could not pad op");
+ LLVM_DEBUG(DBGS() << "Could not get padded shape of operand: FAIL\n");
+ return failure();
}
// 2.b. Expect proper `paddingValues`.
// TODO: we may want to allow garbage padding in the future, in which case
// we would just not assert.
if (opOperand.getOperandNumber() >= options.paddingValues.size()) {
- return rewriter.notifyMatchFailure(opToPad,
- "--no padding value specified");
+ LLVM_DEBUG(DBGS() << "Too few padding values specified: FAIL\n");
+ return failure();
}
Attribute paddingValueAttr =
options.paddingValues[opOperand.getOperandNumber()];
// 2.c. Perform actual padding.
- Value paddedOperand = padOperand(
- rewriter, opToPad, cast<TypedValue<RankedTensorType>>(operand),
- *maybePaddedShape, paddingValueAttr);
+ Value paddedOperand =
+ padOperand(builder, toPad, cast<TypedValue<RankedTensorType>>(operand),
+ *maybePaddedShape, paddingValueAttr);
LLVM_DEBUG(DBGS() << "--done padding operand: " << paddedOperand << "\n");
- // 2.d. Perform actual padding.
newOperands.push_back(paddedOperand);
if (auto padOp = paddedOperand.getDefiningOp<tensor::PadOp>())
padOps.push_back(padOp);
@@ -342,38 +342,34 @@ FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
// 3. Form the resulting tensor::ExtractSliceOp.
ReifiedRankedShapedTypeDims reifiedResultShapes;
- if (failed(reifyResultShapes(rewriter, opToPad, reifiedResultShapes))) {
- LLVM_DEBUG(DBGS() << "--failed to reify result shapes -> FAIL\n");
- return rewriter.notifyMatchFailure(opToPad,
- "failed to reify result shapes");
+ if (failed(reifyResultShapes(builder, toPad, reifiedResultShapes))) {
+ LLVM_DEBUG(DBGS() << "Failed to reify result shapes: FAIL\n");
+ return failure();
}
- assert(reifiedResultShapes.size() == opToPad->getNumResults() &&
+ assert(reifiedResultShapes.size() == toPad->getNumResults() &&
"expected same number of results");
- // Clone `opToPad` to operate on the statically padded shapes.
+ // Clone `toPad` to operate on the statically padded shapes.
auto resultTensorTypes =
- ValueRange(newOperands).take_back(opToPad->getNumResults()).getTypes();
- // clone **should** properly notify the rewriter.
+ ValueRange(newOperands).take_back(toPad->getNumResults()).getTypes();
+ // clone **should** properly notify the builder.
TilingInterface paddedOp =
- clone(rewriter, opToPad, resultTensorTypes, newOperands);
+ clone(builder, toPad, resultTensorTypes, newOperands);
LLVM_DEBUG(DBGS() << "--cloned padded op: " << paddedOp << "\n");
- // Recover the slice out of the new static results. This keeps the original
- // opToPad around because it uses the dims of the original results.
+ // Recover the slice out of the new static results.
SmallVector<Value> paddedSubtensorResults;
- paddedSubtensorResults.reserve(opToPad->getNumResults());
+ paddedSubtensorResults.reserve(toPad->getNumResults());
for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
Value paddedResult = en.value();
int64_t resultNumber = en.index();
int64_t rank = cast<RankedTensorType>(paddedResult.getType()).getRank();
- SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
- SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
+ SmallVector<OpFoldResult> offsets(rank, builder.getIndexAttr(0));
+ SmallVector<OpFoldResult> strides(rank, builder.getIndexAttr(1));
paddedSubtensorResults.push_back(tensor::ExtractSliceOp::create(
- rewriter, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
+ builder, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
strides));
}
- rewriter.replaceOp(opToPad, paddedSubtensorResults);
-
- return paddedOp;
+ return PadTilingInterfaceResult{padOps, paddedOp, paddedSubtensorResults};
}
diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
index 90cbbd8..dcfe2c7 100644
--- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
+++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
@@ -1030,12 +1030,12 @@ struct RemoveConstantIfConditionWithRegion : public OpRewritePattern<OpTy> {
//===----------------------------------------------------------------------===//
/// Create and populate an init region for privatization recipes.
-/// Returns the init block on success, or nullptr on failure.
+/// Returns success if the region is populated, failure otherwise.
/// Sets needsFree to indicate if the allocated memory requires deallocation.
-static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
- Type varType, StringRef varName,
- ValueRange bounds,
- bool &needsFree) {
+static LogicalResult createInitRegion(OpBuilder &builder, Location loc,
+ Region &initRegion, Type varType,
+ StringRef varName, ValueRange bounds,
+ bool &needsFree) {
// Create init block with arguments: original value + bounds
SmallVector<Type> argTypes{varType};
SmallVector<Location> argLocs{loc};
@@ -1044,9 +1044,9 @@ static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
argLocs.push_back(loc);
}
- auto initBlock = std::make_unique<Block>();
+ Block *initBlock = builder.createBlock(&initRegion);
initBlock->addArguments(argTypes, argLocs);
- builder.setInsertionPointToStart(initBlock.get());
+ builder.setInsertionPointToStart(initBlock);
Value privatizedValue;
@@ -1060,7 +1060,7 @@ static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
privatizedValue = mappableTy.generatePrivateInit(
builder, loc, typedVar, varName, bounds, {}, needsFree);
if (!privatizedValue)
- return nullptr;
+ return failure();
} else {
assert(isa<PointerLikeType>(varType) && "Expected PointerLikeType");
auto pointerLikeTy = cast<PointerLikeType>(varType);
@@ -1068,21 +1068,21 @@ static std::unique_ptr<Block> createInitRegion(OpBuilder &builder, Location loc,
privatizedValue = pointerLikeTy.genAllocate(builder, loc, varName, varType,
blockArgVar, needsFree);
if (!privatizedValue)
- return nullptr;
+ return failure();
}
// Add yield operation to init block
acc::YieldOp::create(builder, loc, privatizedValue);
- return initBlock;
+ return success();
}
/// Create and populate a copy region for firstprivate recipes.
-/// Returns the copy block on success, or nullptr on failure.
+/// Returns success if the region is populated, failure otherwise.
/// TODO: Handle MappableType - it does not yet have a copy API.
-static std::unique_ptr<Block> createCopyRegion(OpBuilder &builder, Location loc,
- Type varType,
- ValueRange bounds) {
+static LogicalResult createCopyRegion(OpBuilder &builder, Location loc,
+ Region &copyRegion, Type varType,
+ ValueRange bounds) {
// Create copy block with arguments: original value + privatized value +
// bounds
SmallVector<Type> copyArgTypes{varType, varType};
@@ -1092,16 +1092,16 @@ static std::unique_ptr<Block> createCopyRegion(OpBuilder &builder, Location loc,
copyArgLocs.push_back(loc);
}
- auto copyBlock = std::make_unique<Block>();
+ Block *copyBlock = builder.createBlock(&copyRegion);
copyBlock->addArguments(copyArgTypes, copyArgLocs);
- builder.setInsertionPointToStart(copyBlock.get());
+ builder.setInsertionPointToStart(copyBlock);
bool isMappable = isa<MappableType>(varType);
bool isPointerLike = isa<PointerLikeType>(varType);
// TODO: Handle MappableType - it does not yet have a copy API.
// Otherwise, for now just fallback to pointer-like behavior.
if (isMappable && !isPointerLike)
- return nullptr;
+ return failure();
// Generate copy region body based on variable type
if (isPointerLike) {
@@ -1113,21 +1113,20 @@ static std::unique_ptr<Block> createCopyRegion(OpBuilder &builder, Location loc,
if (!pointerLikeTy.genCopy(
builder, loc, cast<TypedValue<PointerLikeType>>(privatizedArg),
cast<TypedValue<PointerLikeType>>(originalArg), varType))
- return nullptr;
+ return failure();
}
// Add terminator to copy block
acc::TerminatorOp::create(builder, loc);
- return copyBlock;
+ return success();
}
/// Create and populate a destroy region for privatization recipes.
-/// Returns the destroy block on success, or nullptr if not needed.
-static std::unique_ptr<Block> createDestroyRegion(OpBuilder &builder,
- Location loc, Type varType,
- Value allocRes,
- ValueRange bounds) {
+/// Returns success if the region is populated, failure otherwise.
+static LogicalResult createDestroyRegion(OpBuilder &builder, Location loc,
+ Region &destroyRegion, Type varType,
+ Value allocRes, ValueRange bounds) {
// Create destroy block with arguments: original value + privatized value +
// bounds
SmallVector<Type> destroyArgTypes{varType, varType};
@@ -1137,28 +1136,25 @@ static std::unique_ptr<Block> createDestroyRegion(OpBuilder &builder,
destroyArgLocs.push_back(loc);
}
- auto destroyBlock = std::make_unique<Block>();
+ Block *destroyBlock = builder.createBlock(&destroyRegion);
destroyBlock->addArguments(destroyArgTypes, destroyArgLocs);
- builder.setInsertionPointToStart(destroyBlock.get());
+ builder.setInsertionPointToStart(destroyBlock);
- bool isMappable = isa<MappableType>(varType);
- bool isPointerLike = isa<PointerLikeType>(varType);
- // TODO: Handle MappableType - it does not yet have a deallocation API.
- // Otherwise, for now just fallback to pointer-like behavior.
- if (isMappable && !isPointerLike)
- return nullptr;
-
- assert(isa<PointerLikeType>(varType) && "Expected PointerLikeType");
- auto pointerLikeTy = cast<PointerLikeType>(varType);
- auto privatizedArg =
+ auto varToFree =
cast<TypedValue<PointerLikeType>>(destroyBlock->getArgument(1));
- // Pass allocRes to help determine the allocation type
- if (!pointerLikeTy.genFree(builder, loc, privatizedArg, allocRes, varType))
- return nullptr;
+ if (isa<MappableType>(varType)) {
+ auto mappableTy = cast<MappableType>(varType);
+ if (!mappableTy.generatePrivateDestroy(builder, loc, varToFree))
+ return failure();
+ } else {
+ assert(isa<PointerLikeType>(varType) && "Expected PointerLikeType");
+ auto pointerLikeTy = cast<PointerLikeType>(varType);
+ if (!pointerLikeTy.genFree(builder, loc, varToFree, allocRes, varType))
+ return failure();
+ }
acc::TerminatorOp::create(builder, loc);
-
- return destroyBlock;
+ return success();
}
} // namespace
@@ -1220,40 +1216,33 @@ PrivateRecipeOp::createAndPopulate(OpBuilder &builder, Location loc,
if (!isMappable && !isPointerLike)
return std::nullopt;
- // Create init and destroy blocks using shared helpers
OpBuilder::InsertionGuard guard(builder);
- // Save the original insertion point for creating the recipe operation later
- auto originalInsertionPoint = builder.saveInsertionPoint();
+ // Create the recipe operation first so regions have proper parent context
+ auto recipe = PrivateRecipeOp::create(builder, loc, recipeName, varType);
+ // Populate the init region
bool needsFree = false;
- auto initBlock =
- createInitRegion(builder, loc, varType, varName, bounds, needsFree);
- if (!initBlock)
+ if (failed(createInitRegion(builder, loc, recipe.getInitRegion(), varType,
+ varName, bounds, needsFree))) {
+ recipe.erase();
return std::nullopt;
+ }
// Only create destroy region if the allocation needs deallocation
- std::unique_ptr<Block> destroyBlock;
if (needsFree) {
// Extract the allocated value from the init block's yield operation
- auto yieldOp = cast<acc::YieldOp>(initBlock->getTerminator());
+ auto yieldOp =
+ cast<acc::YieldOp>(recipe.getInitRegion().front().getTerminator());
Value allocRes = yieldOp.getOperand(0);
- destroyBlock = createDestroyRegion(builder, loc, varType, allocRes, bounds);
- if (!destroyBlock)
+ if (failed(createDestroyRegion(builder, loc, recipe.getDestroyRegion(),
+ varType, allocRes, bounds))) {
+ recipe.erase();
return std::nullopt;
+ }
}
- // Now create the recipe operation at the original insertion point and attach
- // the blocks
- builder.restoreInsertionPoint(originalInsertionPoint);
- auto recipe = PrivateRecipeOp::create(builder, loc, recipeName, varType);
-
- // Move the blocks into the recipe's regions
- recipe.getInitRegion().push_back(initBlock.release());
- if (destroyBlock)
- recipe.getDestroyRegion().push_back(destroyBlock.release());
-
return recipe;
}
@@ -1299,45 +1288,40 @@ FirstprivateRecipeOp::createAndPopulate(OpBuilder &builder, Location loc,
if (!isMappable && !isPointerLike)
return std::nullopt;
- // Create init, copy, and destroy blocks using shared helpers
OpBuilder::InsertionGuard guard(builder);
- // Save the original insertion point for creating the recipe operation later
- auto originalInsertionPoint = builder.saveInsertionPoint();
+ // Create the recipe operation first so regions have proper parent context
+ auto recipe = FirstprivateRecipeOp::create(builder, loc, recipeName, varType);
+ // Populate the init region
bool needsFree = false;
- auto initBlock =
- createInitRegion(builder, loc, varType, varName, bounds, needsFree);
- if (!initBlock)
+ if (failed(createInitRegion(builder, loc, recipe.getInitRegion(), varType,
+ varName, bounds, needsFree))) {
+ recipe.erase();
return std::nullopt;
+ }
- auto copyBlock = createCopyRegion(builder, loc, varType, bounds);
- if (!copyBlock)
+ // Populate the copy region
+ if (failed(createCopyRegion(builder, loc, recipe.getCopyRegion(), varType,
+ bounds))) {
+ recipe.erase();
return std::nullopt;
+ }
// Only create destroy region if the allocation needs deallocation
- std::unique_ptr<Block> destroyBlock;
if (needsFree) {
// Extract the allocated value from the init block's yield operation
- auto yieldOp = cast<acc::YieldOp>(initBlock->getTerminator());
+ auto yieldOp =
+ cast<acc::YieldOp>(recipe.getInitRegion().front().getTerminator());
Value allocRes = yieldOp.getOperand(0);
- destroyBlock = createDestroyRegion(builder, loc, varType, allocRes, bounds);
- if (!destroyBlock)
+ if (failed(createDestroyRegion(builder, loc, recipe.getDestroyRegion(),
+ varType, allocRes, bounds))) {
+ recipe.erase();
return std::nullopt;
+ }
}
- // Now create the recipe operation at the original insertion point and attach
- // the blocks
- builder.restoreInsertionPoint(originalInsertionPoint);
- auto recipe = FirstprivateRecipeOp::create(builder, loc, recipeName, varType);
-
- // Move the blocks into the recipe's regions
- recipe.getInitRegion().push_back(initBlock.release());
- recipe.getCopyRegion().push_back(copyBlock.release());
- if (destroyBlock)
- recipe.getDestroyRegion().push_back(destroyBlock.release());
-
return recipe;
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
index 1b656d8..ea93085 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
@@ -817,6 +817,50 @@ struct LinearizeVectorToElements final
}
};
+/// Convert broadcasts from scalars or 1-element vectors, such as
+///
+/// ```mlir
+/// vector.broadcast %value : f32 to vector<4x4xf32>
+/// ```
+///
+/// to broadcasts to rank-1 vectors, with shape_casts before/after as needed.
+/// The above becomes,
+///
+/// ```mlir
+/// %out_1d = vector.broadcast %value : f32 to vector<16xf32>
+/// %out_nd = vector.shape_cast %out_1d : vector<16xf32> to vector<4x4xf32>
+/// ```
+struct LinearizeVectorBroadcast final
+ : public OpConversionPattern<vector::BroadcastOp> {
+ using Base::Base;
+
+ LinearizeVectorBroadcast(const TypeConverter &typeConverter,
+ MLIRContext *context, PatternBenefit benefit = 1)
+ : OpConversionPattern(typeConverter, context, benefit) {}
+
+ LogicalResult
+ matchAndRewrite(vector::BroadcastOp broadcastOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+
+ int numElements = 1;
+ Type sourceType = broadcastOp.getSourceType();
+ if (auto vecType = dyn_cast<VectorType>(sourceType)) {
+ numElements = vecType.getNumElements();
+ }
+
+ if (numElements != 1) {
+ return rewriter.notifyMatchFailure(
+ broadcastOp, "only broadcasts of single elements can be linearized.");
+ }
+
+ auto dstTy = getTypeConverter()->convertType(broadcastOp.getType());
+ rewriter.replaceOpWithNewOp<vector::BroadcastOp>(broadcastOp, dstTy,
+ adaptor.getSource());
+
+ return success();
+ }
+};
+
} // namespace
/// This method defines the set of operations that are linearizable, and hence
@@ -909,8 +953,8 @@ void mlir::vector::populateVectorLinearizeBasePatterns(
patterns
.add<LinearizeConstantLike, LinearizeVectorizable, LinearizeVectorBitCast,
LinearizeVectorCreateMask, LinearizeVectorLoad, LinearizeVectorStore,
- LinearizeVectorFromElements, LinearizeVectorToElements>(
- typeConverter, patterns.getContext());
+ LinearizeVectorBroadcast, LinearizeVectorFromElements,
+ LinearizeVectorToElements>(typeConverter, patterns.getContext());
}
void mlir::vector::populateVectorLinearizeShuffleLikeOpsPatterns(
diff --git a/mlir/lib/RegisterAllPasses.cpp b/mlir/lib/RegisterAllPasses.cpp
index c67b242..dd413d2de 100644
--- a/mlir/lib/RegisterAllPasses.cpp
+++ b/mlir/lib/RegisterAllPasses.cpp
@@ -98,4 +98,5 @@ void mlir::registerAllPasses() {
sparse_tensor::registerSparseTensorPipelines();
tosa::registerTosaToLinalgPipelines();
gpu::registerGPUToNVVMPipeline();
+ gpu::registerGPUToXeVMPipeline();
}
diff --git a/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir b/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
index d76627b..c61640c 100644
--- a/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
+++ b/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
@@ -3,6 +3,15 @@
// RUN: mlir-opt %s -convert-math-to-xevm='convert-arith=false' \
// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-NO-ARITH'
+// RUN: mlir-opt --pass-pipeline="builtin.module(convert-math-to-xevm)" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK-MODULE,CHECK-ENTIRE-MODULE'
+// RUN: mlir-opt --pass-pipeline="builtin.module(gpu.module(convert-math-to-xevm))" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK-MODULE,CHECK-ONLY-GPU'
+
+// This test:
+// - check that MathToXeVM converts fastmath math/arith ops properly;
+// - check that MathToXeVM handles nested modules while respecting pass manager.
+
module @test_module {
// CHECK-DAG: llvm.func @_Z22__spirv_ocl_native_expDh(f16) -> f16
// CHECK-DAG: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
@@ -152,4 +161,39 @@ module @test_module {
return
}
+
+ // Check that MathToXeVM handles nested modules while respecting pass manager:
+
+ // CHECK-ENTIRE-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ // CHECK-ONLY-GPU-NOT: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+
+ // CHECK-MODULE-LABEL: @test_gpu
+ gpu.module @test_gpu {
+ // CHECK-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ gpu.func @exp_gpu() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK-MODULE: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ gpu.return
+ }
+ }
+
+ // CHECK-MODULE-LABEL: @exp_func
+ func.func @exp_func() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK-MODULE: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-ENTIRE-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ // CHECK-ONLY-GPU: math.exp
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ return
+ }
}
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index 242c04f..d270ee8 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -1211,6 +1211,57 @@ llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) {
// -----
+// CHECK-LABEL: rocdl.cvt.scalef32.pk16
+llvm.func @rocdl.cvt.scalef32.pk16(%v16xf32: vector<16xf32>,
+ %v16xf16: vector<16xf16>,
+ %v16xbf16: vector<16xbf16>,
+ %scale: f32) {
+
+ // CHECK: rocdl.cvt.scalef32.pk16.fp6.f16
+ %0 = rocdl.cvt.scalef32.pk16.fp6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.fp6.bf16
+ %1 = rocdl.cvt.scalef32.pk16.fp6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.fp6.f32
+ %2 = rocdl.cvt.scalef32.pk16.fp6.f32 %v16xf32, %scale : vector<3xi32>
+
+ // CHECK: rocdl.cvt.scalef32.pk16.bf6.f16
+ %3 = rocdl.cvt.scalef32.pk16.bf6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.bf6.bf16
+ %4 = rocdl.cvt.scalef32.pk16.bf6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.pk16.bf6.f32
+ %5 = rocdl.cvt.scalef32.pk16.bf6.f32 %v16xf32, %scale : vector<3xi32>
+
+ llvm.return
+}
+
+// -----
+
+// CHECK-LABEL: rocdl.cvt.scalef32.sr.pk16
+llvm.func @rocdl.cvt.scalef32.sr.pk16(%v16xf32: vector<16xf32>,
+ %v16xf16: vector<16xf16>,
+ %v16xbf16: vector<16xbf16>,
+ %seed: i32,
+ %scale: f32) {
+
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.fp6.f16
+ %0 = rocdl.cvt.scalef32.sr.pk16.fp6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.fp6.bf16
+ %1 = rocdl.cvt.scalef32.sr.pk16.fp6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.fp6.f32
+ %2 = rocdl.cvt.scalef32.sr.pk16.fp6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.bf6.f16
+ %3 = rocdl.cvt.scalef32.sr.pk16.bf6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.bf6.bf16
+ %4 = rocdl.cvt.scalef32.sr.pk16.bf6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: rocdl.cvt.scalef32.sr.pk16.bf6.f32
+ %5 = rocdl.cvt.scalef32.sr.pk16.bf6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ llvm.return
+}
+
+// -----
+
// expected-error@below {{attribute attached to unexpected op}}
func.func private @expected_llvm_func() attributes { rocdl.kernel }
diff --git a/mlir/test/Dialect/Vector/linearize.mlir b/mlir/test/Dialect/Vector/linearize.mlir
index ee5cfbcd..cbbc833 100644
--- a/mlir/test/Dialect/Vector/linearize.mlir
+++ b/mlir/test/Dialect/Vector/linearize.mlir
@@ -428,6 +428,47 @@ func.func @test_linearize_across_for(%arg0 : vector<4xi8>) -> vector<4xi8> {
// -----
+// CHECK-LABEL: linearize_vector_broadcast_scalar_source
+// CHECK-SAME: (%[[ARG:.*]]: i32) -> vector<4x2xi32>
+func.func @linearize_vector_broadcast_scalar_source(%arg0: i32) -> vector<4x2xi32> {
+
+ // CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[ARG]] : i32 to vector<8xi32>
+ // CHECK: %[[CAST:.*]] = vector.shape_cast %[[BROADCAST]] : vector<8xi32> to vector<4x2xi32>
+ // CHECK: return %[[CAST]] : vector<4x2xi32>
+ %0 = vector.broadcast %arg0 : i32 to vector<4x2xi32>
+ return %0 : vector<4x2xi32>
+}
+
+// -----
+
+// CHECK-LABEL: linearize_vector_broadcast_rank_two_source
+// CHECK-SAME: (%[[ARG:.*]]: vector<1x1xi32>) -> vector<4x2xi32>
+func.func @linearize_vector_broadcast_rank_two_source(%arg0: vector<1x1xi32>) -> vector<4x2xi32> {
+
+ // CHECK: %[[CAST0:.*]] = vector.shape_cast %[[ARG]] : vector<1x1xi32> to vector<1xi32>
+ // CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[CAST0]] : vector<1xi32> to vector<8xi32>
+ // CHECK: %[[CAST1:.*]] = vector.shape_cast %[[BROADCAST]] : vector<8xi32> to vector<4x2xi32>
+ // CHECK: return %[[CAST1]] : vector<4x2xi32>
+ %0 = vector.broadcast %arg0 : vector<1x1xi32> to vector<4x2xi32>
+ return %0 : vector<4x2xi32>
+}
+
+// -----
+
+// CHECK-LABEL: linearize_scalable_vector_broadcast
+// CHECK-SAME: (%[[ARG:.*]]: i32) -> vector<4x[2]xi32>
+func.func @linearize_scalable_vector_broadcast(%arg0: i32) -> vector<4x[2]xi32> {
+
+ // CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[ARG]] : i32 to vector<[8]xi32>
+ // CHECK: %[[CAST:.*]] = vector.shape_cast %[[BROADCAST]] : vector<[8]xi32> to vector<4x[2]xi32>
+ // CHECK: return %[[CAST]] : vector<4x[2]xi32>
+ %0 = vector.broadcast %arg0 : i32 to vector<4x[2]xi32>
+ return %0 : vector<4x[2]xi32>
+
+}
+
+// -----
+
// CHECK-LABEL: linearize_create_mask
// CHECK-SAME: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index) -> vector<1x16xi1>
func.func @linearize_create_mask(%arg0 : index, %arg1 : index) -> vector<1x16xi1> {
diff --git a/mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg b/mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg
new file mode 100644
index 0000000..d0d51c6
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/LANE/lit.local.cfg
@@ -0,0 +1,4 @@
+if not config.run_xevm_tests:
+ config.unsupported = True
+if not config.enable_levelzero_runner:
+ config.unsupported = True
diff --git a/mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir b/mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir
new file mode 100644
index 0000000..ffe29ef
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/LANE/simple_gemm.mlir
@@ -0,0 +1,121 @@
+// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=lane" \
+// RUN: | mlir-runner \
+// RUN: --shared-libs=%mlir_levelzero_runtime \
+// RUN: --shared-libs=%mlir_runner_utils \
+// RUN: --entry-point-result=void \
+// RUN: | FileCheck %s
+
+module @gemm attributes {gpu.container_module} {
+ gpu.module @kernel {
+ gpu.func @simple_gemm(%a: memref<256x256xf16>, %b: memref<256x256xf16>, %c: memref<256x256xf32>) kernel {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c8 = arith.constant 8 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %c256 = arith.constant 256 : index
+ %block_x = gpu.block_id x
+ %block_y = gpu.block_id y
+ %x_block_offset = arith.muli %block_x, %c8 : index
+ %y_block_offset = arith.muli %block_y, %c16 : index
+
+ %c_tdesc = xegpu.create_nd_tdesc %c : memref<256x256xf32> -> !xegpu.tensor_desc<8x16xf32>
+ %c_init_value = xegpu.load_nd %c_tdesc[%x_block_offset, %y_block_offset] : !xegpu.tensor_desc<8x16xf32> -> vector<8xf32>
+ %a_tdesc = xegpu.create_nd_tdesc %a : memref<256x256xf16> -> !xegpu.tensor_desc<8x16xf16>
+ %b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
+
+ %r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8xf32>) {
+ %a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16>
+ %b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16>
+ %dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8xf16>, vector<16xf16>, vector<8xf32> -> vector<8xf32>
+ scf.yield %dpas : vector<8xf32>
+ }
+ xegpu.store_nd %r, %c_tdesc[%x_block_offset, %y_block_offset] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<8xf32>, !xegpu.tensor_desc<8x16xf32>
+ gpu.return
+ }
+ }
+
+ func.func @test(%a : memref<256x256xf16>, %b : memref<256x256xf16>, %c : memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
+ %c1 = arith.constant 1 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %memref_a = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_a, %a : memref<256x256xf16>, memref<256x256xf16>
+ %memref_b = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_b, %b : memref<256x256xf16>, memref<256x256xf16>
+ %memref_c = gpu.alloc () : memref<256x256xf32>
+ gpu.memcpy %memref_c, %c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.launch_func @kernel::@simple_gemm blocks in (%c32, %c16, %c1) threads in (%c16, %c1, %c1) args(%memref_a : memref<256x256xf16>, %memref_b : memref<256x256xf16>, %memref_c : memref<256x256xf32>)
+ gpu.wait // Wait for the kernel to finish.
+ gpu.memcpy %c, %memref_c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.dealloc %memref_a : memref<256x256xf16>
+ gpu.dealloc %memref_b : memref<256x256xf16>
+ gpu.dealloc %memref_c : memref<256x256xf32>
+ return %c : memref<256x256xf32>
+ }
+
+ func.func @main() attributes {llvm.emit_c_interface} {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1_f16 = arith.constant 1.0 : f16
+ %c2_f16 = arith.constant 2.0 : f16
+ %c256 = arith.constant 256 : index
+ %cf_0 = arith.constant 0.0 : f16
+ %cf_1 = arith.constant 1.0 : f16
+ %A = memref.alloc() : memref<256x256xf16>
+ %B = memref.alloc() : memref<256x256xf16>
+ %C = memref.alloc() : memref<256x256xf32>
+ %C_ref = memref.alloc() : memref<256x256xf32>
+ %c_gen_int = arith.constant 0 : i1
+ %cf_lower = arith.constant -0.5 : f32
+ %cf_upper = arith.constant 0.5 : f32
+
+ // Initialize matrix A ; A[i, j] = j
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %t = index.castu %j : index to i16
+ %val = arith.uitofp %t : i16 to f16
+ memref.store %val, %A[%i, %j] : memref<256x256xf16>
+ }
+ }
+
+ // Initialize the B matrix.
+ // Make matrix B an identity matrix.
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %i_i32 = index.castu %i : index to i32
+ %j_i32 = index.castu %j : index to i32
+ %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
+
+ scf.if %i_j_same {
+ memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
+ } else {
+ memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
+ }
+ }
+ }
+
+ // Initialize matrix C and C_ref ; C[i, j] = 0
+ %c0_f32 = arith.constant 0.0 : f32
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
+ memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
+ }
+ }
+
+ // Run GPU version.
+ %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
+ %gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
+
+ // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
+ // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
+ call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> ()
+ memref.dealloc %A : memref<256x256xf16>
+ memref.dealloc %B : memref<256x256xf16>
+ memref.dealloc %C : memref<256x256xf32>
+ memref.dealloc %C_ref : memref<256x256xf32>
+ return
+ }
+ func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
+}
diff --git a/mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg b/mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg
new file mode 100644
index 0000000..d0d51c6
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/SG/lit.local.cfg
@@ -0,0 +1,4 @@
+if not config.run_xevm_tests:
+ config.unsupported = True
+if not config.enable_levelzero_runner:
+ config.unsupported = True
diff --git a/mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir b/mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir
new file mode 100644
index 0000000..877edf4
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/SG/simple_gemm.mlir
@@ -0,0 +1,120 @@
+// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=subgroup" \
+// RUN: | mlir-runner \
+// RUN: --shared-libs=%mlir_levelzero_runtime \
+// RUN: --shared-libs=%mlir_runner_utils \
+// RUN: --entry-point-result=void \
+// RUN: | FileCheck %s
+
+module @gemm attributes {gpu.container_module} {
+ gpu.module @kernel {
+ gpu.func @simple_gemm(%a: memref<256x256xf16>, %b: memref<256x256xf16>, %c: memref<256x256xf32>) kernel {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c8 = arith.constant 8 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %c256 = arith.constant 256 : index
+ %block_x = gpu.block_id x
+ %block_y = gpu.block_id y
+ %x_block_offset = arith.muli %block_x, %c8 : index
+ %y_block_offset = arith.muli %block_y, %c16 : index
+
+ %c_tdesc = xegpu.create_nd_tdesc %c : memref<256x256xf32> -> !xegpu.tensor_desc<8x16xf32>
+ %c_init_value = xegpu.load_nd %c_tdesc[%x_block_offset, %y_block_offset] : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32>
+ %a_tdesc = xegpu.create_nd_tdesc %a : memref<256x256xf16> -> !xegpu.tensor_desc<8x16xf16>
+ %b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
+
+ %r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8x16xf32>) {
+ %a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8x16xf16>
+ %b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16x16xf16>
+ %dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32>
+ scf.yield %dpas : vector<8x16xf32>
+ }
+ xegpu.store_nd %r, %c_tdesc[%x_block_offset, %y_block_offset] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
+ gpu.return
+ }
+ }
+
+ func.func @test(%a : memref<256x256xf16>, %b : memref<256x256xf16>, %c : memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
+ %c1 = arith.constant 1 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %memref_a = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_a, %a : memref<256x256xf16>, memref<256x256xf16>
+ %memref_b = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %memref_b, %b : memref<256x256xf16>, memref<256x256xf16>
+ %memref_c = gpu.alloc () : memref<256x256xf32>
+ gpu.memcpy %memref_c, %c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.launch_func @kernel::@simple_gemm blocks in (%c32, %c16, %c1) threads in (%c16, %c1, %c1) args(%memref_a : memref<256x256xf16>, %memref_b : memref<256x256xf16>, %memref_c : memref<256x256xf32>)
+ gpu.wait // Wait for the kernel to finish.
+ gpu.memcpy %c, %memref_c : memref<256x256xf32>, memref<256x256xf32>
+ gpu.dealloc %memref_a : memref<256x256xf16>
+ gpu.dealloc %memref_b : memref<256x256xf16>
+ gpu.dealloc %memref_c : memref<256x256xf32>
+ return %c : memref<256x256xf32>
+ }
+
+
+ func.func @main() attributes {llvm.emit_c_interface} {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1_f16 = arith.constant 1.0 : f16
+ %c2_f16 = arith.constant 2.0 : f16
+ %c256 = arith.constant 256 : index
+ %cf_0 = arith.constant 0.0 : f16
+ %cf_1 = arith.constant 1.0 : f16
+ %A = memref.alloc() : memref<256x256xf16>
+ %B = memref.alloc() : memref<256x256xf16>
+ %C = memref.alloc() : memref<256x256xf32>
+ %C_ref = memref.alloc() : memref<256x256xf32>
+ %c_gen_int = arith.constant 0 : i1
+ %cf_lower = arith.constant -0.5 : f32
+ %cf_upper = arith.constant 0.5 : f32
+ // Option 1: intialize matrix A ; A[i, j] = j
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %t = index.castu %j : index to i16
+ %val = arith.uitofp %t : i16 to f16
+ memref.store %val, %A[%i, %j] : memref<256x256xf16>
+ }
+ }
+
+ // Initialize the B matrix
+ // Make matrix B an identity matrix
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %i_i32 = index.castu %i : index to i32
+ %j_i32 = index.castu %j : index to i32
+ %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
+
+ scf.if %i_j_same {
+ memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
+ } else {
+ memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
+ }
+ }
+ }
+ // intialize matrix C and C_ref ; C[i, j] = 0
+ %c0_f32 = arith.constant 0.0 : f32
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
+ memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
+ }
+ }
+
+ // Run GPU.
+ %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
+ %cast_C = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
+ // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
+ // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
+ call @printMemrefF32(%cast_C) : (memref<*xf32>) -> ()
+
+ memref.dealloc %A : memref<256x256xf16>
+ memref.dealloc %B : memref<256x256xf16>
+ memref.dealloc %C : memref<256x256xf32>
+ memref.dealloc %C_ref : memref<256x256xf32>
+ return
+ }
+ func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
+}
diff --git a/mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg b/mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg
new file mode 100644
index 0000000..d0d51c6
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/WG/lit.local.cfg
@@ -0,0 +1,4 @@
+if not config.run_xevm_tests:
+ config.unsupported = True
+if not config.enable_levelzero_runner:
+ config.unsupported = True
diff --git a/mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir b/mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir
new file mode 100644
index 0000000..3f2fff9
--- /dev/null
+++ b/mlir/test/Integration/Dialect/XeGPU/WG/simple_gemm.mlir
@@ -0,0 +1,151 @@
+// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=workgroup" \
+// RUN: | mlir-runner \
+// RUN: --shared-libs=%mlir_levelzero_runtime \
+// RUN: --shared-libs=%mlir_runner_utils \
+// RUN: --entry-point-result=void \
+// RUN: | FileCheck %s
+
+#a = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32], inst_data = [8, 16]>
+#b = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [16, 16]>
+#c = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [8, 16]>
+#a_prefetch = #xegpu.layout<sg_layout = [32, 1], sg_data = [8, 32], inst_data = [8, 16]>
+#b_prefetch = #xegpu.layout<sg_layout = [4, 8], sg_data = [8, 32], inst_data = [8, 16]>
+module @gemm attributes {gpu.container_module} {
+ func.func @test(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
+ %c1 = arith.constant 1 : index
+ %c4 = arith.constant 4 : index
+ %c8 = arith.constant 8 : index
+ %c16 = arith.constant 16 : index
+ %c32 = arith.constant 32 : index
+ %c64 = arith.constant 64 : index
+ %c128 = arith.constant 128 : index
+ %c512 = arith.constant 512 : index
+ %A_gpu = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %A_gpu, %A : memref<256x256xf16>, memref<256x256xf16>
+ %B_gpu = gpu.alloc () : memref<256x256xf16>
+ gpu.memcpy %B_gpu, %B : memref<256x256xf16>, memref<256x256xf16>
+ %C_gpu = gpu.alloc () : memref<256x256xf32>
+ gpu.memcpy %C_gpu, %C : memref<256x256xf32>, memref<256x256xf32>
+ // NOTE: Here we can't use [8, 64] wi threads following
+ // the SG thread layout of [8, 4]. Because runtime will linearize
+ // the x dimension first (we need y dimension to be linearized first).
+ // So just use linearized thread layout of [512, 1] wi threads.
+ gpu.launch_func @test_kernel::@test_kernel blocks in (%c1, %c1, %c1) threads in (%c512, %c1, %c1) args(%A_gpu : memref<256x256xf16>, %B_gpu : memref<256x256xf16>, %C_gpu : memref<256x256xf32>)
+ gpu.wait // Wait for the kernel to finish.
+ gpu.memcpy %C, %C_gpu : memref<256x256xf32>, memref<256x256xf32>
+ gpu.dealloc %A_gpu : memref<256x256xf16>
+ gpu.dealloc %B_gpu : memref<256x256xf16>
+ gpu.dealloc %C_gpu : memref<256x256xf32>
+ return %C : memref<256x256xf32>
+ }
+
+ gpu.module @test_kernel {
+ gpu.func @test_kernel(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) kernel {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c32 = arith.constant 32 : index
+ %c64 = arith.constant 64 : index
+ %c96 = arith.constant 96 : index
+ %c256 = arith.constant 256 : index
+ %c4096 = arith.constant 4096 : index
+ %block_id_x = gpu.block_id x
+ %block_id_y = gpu.block_id y
+ %m = arith.muli %block_id_x, %c256 : index
+ %n = arith.muli %block_id_y, %c256 : index
+ %c_tdesc = xegpu.create_nd_tdesc %C : memref<256x256xf32> -> !xegpu.tensor_desc<256x256xf32, #c>
+ %c_init_value = xegpu.load_nd %c_tdesc[%m, %n] : !xegpu.tensor_desc<256x256xf32, #c> -> vector<256x256xf32>
+ %a_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a>
+ %b_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b>
+ // Prefetch A 3 times.
+ %a_prefetch_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c0] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c32] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c64] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ // Prefetch B 3 times.
+ %b_prefetch_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%c0, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%c32, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%c64, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+
+ %out = scf.for %k = %c0 to %c256 step %c32
+ iter_args(%c_value = %c_init_value)
+ -> (vector<256x256xf32>) {
+ %a_value = xegpu.load_nd %a_tdesc[%m, %k] : !xegpu.tensor_desc<256x32xf16, #a> -> vector<256x32xf16>
+ %b_value = xegpu.load_nd %b_tdesc[%k, %n] : !xegpu.tensor_desc<32x256xf16, #b> -> vector<32x256xf16>
+ // Prefetch next tiles.
+ %prefetch_offset = arith.addi %k, %c96 : index
+ xegpu.prefetch_nd %a_prefetch_tdesc[%m, %prefetch_offset] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
+ xegpu.prefetch_nd %b_prefetch_tdesc[%prefetch_offset, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
+ %c_new_value = xegpu.dpas %a_value, %b_value, %c_value {layout_result_0 = #c}
+ : vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf32> -> vector<256x256xf32>
+ scf.yield %c_new_value : vector<256x256xf32>
+ }
+ xegpu.store_nd %out, %c_tdesc[%m, %n] : vector<256x256xf32>, !xegpu.tensor_desc<256x256xf32, #c>
+ gpu.return
+ }
+ }
+
+ func.func @main() attributes {llvm.emit_c_interface} {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c1_f16 = arith.constant 1.0 : f16
+ %c2_f16 = arith.constant 2.0 : f16
+ %c256 = arith.constant 256 : index
+ %cf_0 = arith.constant 0.0 : f16
+ %cf_1 = arith.constant 1.0 : f16
+ %A = memref.alloc() : memref<256x256xf16>
+ %B = memref.alloc() : memref<256x256xf16>
+ %C = memref.alloc() : memref<256x256xf32>
+ %C_ref = memref.alloc() : memref<256x256xf32>
+ %c_gen_int = arith.constant 0 : i1
+ %cf_lower = arith.constant -0.5 : f32
+ %cf_upper = arith.constant 0.5 : f32
+ // Intialize matrix A ; A[i, j] = j
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %t = index.castu %j : index to i16
+ %val = arith.uitofp %t : i16 to f16
+ memref.store %val, %A[%i, %j] : memref<256x256xf16>
+ }
+ }
+
+ // Initialize the B matrix
+ // Make matrix B an identity matrix
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ %i_i32 = index.castu %i : index to i32
+ %j_i32 = index.castu %j : index to i32
+ %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
+
+ scf.if %i_j_same {
+ memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
+ } else {
+ memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
+ }
+ }
+ }
+
+ // Initialize matrix C and C_ref ; C[i, j] = 0
+ %c0_f32 = arith.constant 0.0 : f32
+ scf.for %i = %c0 to %c256 step %c1 {
+ scf.for %j = %c0 to %c256 step %c1 {
+ memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
+ memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
+ }
+ }
+
+ // Run GPU version.
+ %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
+ %gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
+ // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
+ // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
+ call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> ()
+
+ memref.dealloc %A : memref<256x256xf16>
+ memref.dealloc %B : memref<256x256xf16>
+ memref.dealloc %C : memref<256x256xf32>
+ memref.dealloc %C_ref : memref<256x256xf32>
+ return
+ }
+ func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
+}
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir
index 6536fac..30126f6 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -872,9 +872,11 @@ llvm.func @rocdl.mfma.scale.f32.16x16x128.f8f6f4(%arg0 : i32,
}
llvm.func @rocdl.wmma(%arg0 : vector<8xf32>, %arg1 : vector<16 x f16>, %arg2 : vector<16 x i16>, %arg3 : vector<8 x i32>,
- %arg4 : vector<2xi32>, %arg5 : vector<4xi32>, %arg6 : vector<4xf32>, %arg7 : vector<8xf16>, %arg8 : vector<8xi16>) -> vector<8xf32> {
+ %arg4 : vector<2xi32>, %arg5 : vector<4xi32>, %arg6 : vector<4xf32>, %arg7 : vector<8xf16>, %arg8 : vector<8xi16>,
+ %arg9 : vector<32xf16>, %arg10 : vector<16xf32>, %arg11 : vector<4xf32>, %arg12 : vector<32xf32>, %arg13 : vector<64xf32>,
+ %arg14 : vector<64xi32>, %arg15 : vector<64xf16>, %arg16 : vector<16xbf16>, %arg17 : vector<32xbf16>) -> vector<8xf32> {
%zero = llvm.mlir.constant(false) : i1
-
+ %zero_i16 = llvm.mlir.constant(0 : i16) : i16
// ---- Wave32 -----
// f16 -> f32
@@ -905,6 +907,83 @@ llvm.func @rocdl.wmma(%arg0 : vector<8xf32>, %arg1 : vector<16 x f16>, %arg2 : v
// CHECK: call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32(i1 {{.*}}, <2 x i32> %{{.*}}, i1 {{.*}}, <2 x i32> %{{.*}}, <8 x i32> %{{.*}}, i1 {{.*}})
%r6.gfx12 = rocdl.wmma.i32.16x16x32.iu4 %zero, %arg4, %zero, %arg4, %arg3, %zero : (i1, vector<2xi32>, i1, vector<2xi32>, vector<8xi32>, i1) -> vector<8xi32>
+ // f32 -> f32
+ // CHECK: call <4 x float> @llvm.amdgcn.wmma.f32.16x16x4.f32.v4f32.v16f32(i1 {{.*}}, <16 x float> %{{.*}}, i1 {{.*}}, <16 x float> %{{.*}}, i16 0, <4 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r1.gfx1250 = rocdl.wmma.f32.16x16x4.f32 %zero, %arg10, %zero, %arg10, %zero_i16, %arg11, %zero, %zero : (i1, vector<16xf32>, i1, vector<16xf32>, i16, vector<4xf32>, i1, i1) -> vector<4xf32>
+
+ // f16 -> f32
+ // CHECK: call <32 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v32f32.v16f16(i1 {{.*}}, <16 x half> %{{.*}}, i1 {{.*}}, <16 x half> %{{.*}}, i16 0, <32 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r2.gfx1250 = rocdl.wmma.f32.16x16x32.f16 %zero, %arg1, %zero, %arg1, %zero_i16, %arg12, %zero, %zero : (i1, vector<16xf16>, i1, vector<16xf16>, i16, vector<32xf32>, i1, i1) -> vector<32xf32>
+
+ // bf16 -> f32
+ // CHECK: call <32 x float> @llvm.amdgcn.wmma.f32.16x16x32.bf16.v32f32.v16bf16(i1 {{.*}}, <16 x bfloat> %{{.*}}, i1 {{.*}}, <16 x bfloat> %{{.*}}, i16 0, <32 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r3.gfx1250 = rocdl.wmma.f32.16x16x32.bf16 %zero, %arg16, %zero, %arg16, %zero_i16, %arg12, %zero, %zero : (i1, vector<16xbf16>, i1, vector<16xbf16>, i16, vector<32xf32>, i1, i1) -> vector<32xf32>
+
+ // f16 -> f16
+ // CHECK: call <32 x half> @llvm.amdgcn.wmma.f16.16x16x32.f16.v32f16.v16f16(i1 {{.*}}, <16 x half> %{{.*}}, i1 {{.*}}, <16 x half> %{{.*}}, i16 0, <32 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r4.gfx1250 = rocdl.wmma.f16.16x16x32.f16 %zero, %arg1, %zero, %arg1, %zero_i16, %arg9, %zero, %zero : (i1, vector<16xf16>, i1, vector<16xf16>, i16, vector<32xf16>, i1, i1) -> vector<32xf16>
+
+ // bf16 -> bf16
+ // CHECK: call <32 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x32.bf16.v32bf16.v16bf16(i1 {{.*}}, <16 x bfloat> %{{.*}}, i1 {{.*}}, <16 x bfloat> %{{.*}}, i16 0, <32 x bfloat> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r5.gfx1250 = rocdl.wmma.bf16.16x16x32.bf16 %zero, %arg16, %zero, %arg16, %zero_i16, %arg17, %zero, %zero : (i1, vector<16xbf16>, i1, vector<16xbf16>, i16, vector<32xbf16>, i1, i1) -> vector<32xbf16>
+
+ // bf16 -> bf16 / f32
+ // CHECK: call <32 x bfloat> @llvm.amdgcn.wmma.bf16f32.16x16x32.bf16.v32bf16.v16bf16.v32f32(i1 {{.*}}, <16 x bfloat> %{{.*}}, i1 {{.*}}, <16 x bfloat> %{{.*}}, i16 0, <32 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r6.gfx1250 = rocdl.wmma.bf16f32.16x16x32.bf16 %zero, %arg16, %zero, %arg16, %zero_i16, %arg12, %zero, %zero : (i1, vector<16xbf16>, i1, vector<16xbf16>, i16, vector<32xf32>, i1, i1) -> vector<32xbf16>
+
+ // f8/bf8 -> f16/f32
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.fp8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r7.gfx1250 = rocdl.wmma.f32.16x16x64.fp8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.fp8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r8.gfx1250 = rocdl.wmma.f32.16x16x64.fp8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.bf8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r9.gfx1250 = rocdl.wmma.f32.16x16x64.bf8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x64.bf8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r10.gfx1250 = rocdl.wmma.f32.16x16x64.bf8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.fp8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r11.gfx1250 = rocdl.wmma.f16.16x16x64.fp8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.fp8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r12.gfx1250 = rocdl.wmma.f16.16x16x64.fp8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.bf8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r13.gfx1250 = rocdl.wmma.f16.16x16x64.bf8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x64.bf8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r14.gfx1250 = rocdl.wmma.f16.16x16x64.bf8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r15.gfx1250 = rocdl.wmma.f32.16x16x128.fp8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r16.gfx1250 = rocdl.wmma.f32.16x16x128.fp8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r17.gfx1250 = rocdl.wmma.f32.16x16x128.bf8_fp8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v64f32.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x float> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r18.gfx1250 = rocdl.wmma.f32.16x16x128.bf8_bf8 %arg5, %arg5, %zero_i16, %arg13, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf32>, i1, i1) -> vector<64xf32>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r19.gfx1250 = rocdl.wmma.f16.16x16x128.fp8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r20.gfx1250 = rocdl.wmma.f16.16x16x128.fp8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.fp8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r21.gfx1250 = rocdl.wmma.f16.16x16x128.bf8_fp8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // CHECK: call <64 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.bf8.v64f16.v4i32(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i16 0, <64 x half> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r22.gfx1250 = rocdl.wmma.f16.16x16x128.bf8_bf8 %arg5, %arg5, %zero_i16, %arg15, %zero, %zero : (vector<4xi32>, vector<4xi32>, i16, vector<64xf16>, i1, i1) -> vector<64xf16>
+
+ // iu8 -> i32
+ // CHECK: call <64 x i32> @llvm.amdgcn.wmma.i32.16x16x64.iu8.v64i32.v4i32(i1 {{.*}}, <4 x i32> %{{.*}}, i1 {{.*}}, <4 x i32> %{{.*}}, <64 x i32> %{{.*}}, i1 {{.*}}, i1 {{.*}})
+ %r23.gfx1250 = rocdl.wmma.i32.16x16x64.iu8 %zero, %arg5, %zero, %arg5, %arg14, %zero, %zero : (i1, vector<4xi32>, i1, vector<4xi32>, vector<64xi32>, i1, i1) -> vector<64xi32>
+
// ---- Wave64 -----
// f16 -> f32
@@ -1477,6 +1556,52 @@ llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) {
llvm.return
}
+// CHECK-LABEL: rocdl.cvt.scalef32.pk16
+// CHECK-SAME:(<16 x float> %[[V16F32:.+]], <16 x half> %[[V16F16:.+]], <16 x bfloat> %[[V16BF16:.+]], float %[[SCALE:.+]])
+llvm.func @rocdl.cvt.scalef32.pk16(%v16xf32: vector<16xf32>, %v16xf16: vector<16xf16>, %v16xbf16: vector<16xbf16>, %scale: f32) {
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f16(<16 x half> %[[V16F16]], float %[[SCALE]])
+ %0 = rocdl.cvt.scalef32.pk16.fp6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.bf16(<16 x bfloat> %[[V16BF16]], float %[[SCALE]])
+ %1 = rocdl.cvt.scalef32.pk16.fp6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f32(<16 x float> %[[V16F32]], float %[[SCALE]])
+ %2 = rocdl.cvt.scalef32.pk16.fp6.f32 %v16xf32, %scale : vector<3xi32>
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f16(<16 x half> %[[V16F16]], float %[[SCALE]])
+ %3 = rocdl.cvt.scalef32.pk16.bf6.f16 %v16xf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.bf16(<16 x bfloat> %[[V16BF16]], float %[[SCALE]])
+ %4 = rocdl.cvt.scalef32.pk16.bf6.bf16 %v16xbf16, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f32(<16 x float> %[[V16F32]], float %[[SCALE]])
+ %5 = rocdl.cvt.scalef32.pk16.bf6.f32 %v16xf32, %scale : vector<3xi32>
+
+ llvm.return
+}
+
+// CHECK-LABEL: rocdl.cvt.scalef32.sr.pk16
+// CHECK-SAME:(<16 x float> %[[V16F32:.+]], <16 x half> %[[V16F16:.+]], <16 x bfloat> %[[V16BF16:.+]], i32 %[[SEED:.+]], float %[[SCALE:.+]])
+llvm.func @rocdl.cvt.scalef32.sr.pk16(%v16xf32: vector<16xf32>,
+ %v16xf16: vector<16xf16>,
+ %v16xbf16: vector<16xbf16>,
+ %seed: i32,
+ %scale: f32) {
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f16(<16 x half> %[[V16F16]], i32 %[[SEED]], float %[[SCALE]])
+ %0 = rocdl.cvt.scalef32.sr.pk16.fp6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.bf16(<16 x bfloat> %[[V16BF16]], i32 %[[SEED]], float %[[SCALE]])
+ %1 = rocdl.cvt.scalef32.sr.pk16.fp6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f32(<16 x float> %[[V16F32]], i32 %[[SEED]], float %[[SCALE]])
+ %2 = rocdl.cvt.scalef32.sr.pk16.fp6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f16(<16 x half> %[[V16F16]], i32 %[[SEED]], float %[[SCALE]])
+ %3 = rocdl.cvt.scalef32.sr.pk16.bf6.f16 %v16xf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.bf16(<16 x bfloat> %[[V16BF16]], i32 %[[SEED]], float %[[SCALE]])
+ %4 = rocdl.cvt.scalef32.sr.pk16.bf6.bf16 %v16xbf16, %seed, %scale : vector<3xi32>
+ // CHECK: call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f32(<16 x float> %[[V16F32]], i32 %[[SEED]], float %[[SCALE]])
+ %5 = rocdl.cvt.scalef32.sr.pk16.bf6.f32 %v16xf32, %seed, %scale : vector<3xi32>
+
+ llvm.return
+}
+
// CHECK-DAG: attributes #[[$KERNEL_ATTRS]] = { "amdgpu-flat-work-group-size"="1,256" "uniform-work-group-size"="true" }
// CHECK-DAG: attributes #[[$KERNEL_WORKGROUP_ATTRS]] = { "amdgpu-flat-work-group-size"="1,1024"
// CHECK-DAG: attributes #[[$KNOWN_BLOCK_SIZE_ATTRS]] = { "amdgpu-flat-work-group-size"="128,128"
diff --git a/polly/lib/Transform/Canonicalization.cpp b/polly/lib/Transform/Canonicalization.cpp
index 748d710..1be560e 100644
--- a/polly/lib/Transform/Canonicalization.cpp
+++ b/polly/lib/Transform/Canonicalization.cpp
@@ -104,8 +104,7 @@ polly::buildCanonicalicationPassesForNPM(llvm::ModulePassManager &MPM,
LoopPassManager LPM;
LPM.addPass(LoopRotatePass(Level != OptimizationLevel::Oz));
FPM.addPass(createFunctionToLoopPassAdaptor<LoopPassManager>(
- std::move(LPM), /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/false));
+ std::move(LPM), /*UseMemorySSA=*/false));
}
if (PollyInliner) {
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
@@ -121,8 +120,7 @@ polly::buildCanonicalicationPassesForNPM(llvm::ModulePassManager &MPM,
LoopPassManager LPM;
LPM.addPass(IndVarSimplifyPass());
FPM.addPass(createFunctionToLoopPassAdaptor<LoopPassManager>(
- std::move(LPM), /*UseMemorySSA=*/false,
- /*UseBlockFrequencyInfo=*/true));
+ std::move(LPM), /*UseMemorySSA=*/false));
}
return FPM;
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index ac58e39..a4724b9 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -5512,12 +5512,13 @@ gentbl_cc_library(
cc_library(
name = "GPUPipelines",
- srcs = ["lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp"],
+ srcs = glob(["lib/Dialect/GPU/Pipelines/*.cpp"]),
hdrs = ["include/mlir/Dialect/GPU/Pipelines/Passes.h"],
includes = ["include"],
deps = [
":AffineToStandard",
":ArithToLLVM",
+ ":ConversionPasses",
":FuncDialect",
":FuncToLLVM",
":GPUDialect",
@@ -5526,8 +5527,10 @@ cc_library(
":GPUTransforms",
":IndexToLLVM",
":LLVMDialect",
+ ":LLVMIRTransforms",
":LinalgTransforms",
":MathToLLVM",
+ ":MathToXeVM",
":MemRefToLLVM",
":MemRefTransforms",
":NVGPUToNVVM",
@@ -5538,6 +5541,10 @@ cc_library(
":Transforms",
":VectorToLLVM",
":VectorToSCF",
+ ":XeGPUToXeVM",
+ ":XeGPUTransforms",
+ ":XeVMTarget",
+ ":XeVMToLLVM",
],
)
@@ -7066,6 +7073,7 @@ cc_library(
]),
includes = ["include"],
deps = [
+ ":AMDGPUUtils",
":ConversionPassIncGen",
":DialectUtils",
":FuncDialect",
@@ -7079,6 +7087,7 @@ cc_library(
":ROCDLDialect",
":TransformUtils",
":VectorDialect",
+ "//llvm:Support",
],
)